alpha.h (MASK_LONG_DOUBLE_128): New.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55
56 /* Specify which cpu to schedule for. */
57
58 enum processor_type alpha_cpu;
59 static const char * const alpha_cpu_name[] =
60 {
61 "ev4", "ev5", "ev6"
62 };
63
64 /* Specify how accurate floating-point traps need to be. */
65
66 enum alpha_trap_precision alpha_tp;
67
68 /* Specify the floating-point rounding mode. */
69
70 enum alpha_fp_rounding_mode alpha_fprm;
71
72 /* Specify which things cause traps. */
73
74 enum alpha_fp_trap_mode alpha_fptm;
75
76 /* Specify bit size of immediate TLS offsets. */
77
78 int alpha_tls_size = 32;
79
80 /* Strings decoded into the above options. */
81
82 const char *alpha_cpu_string; /* -mcpu= */
83 const char *alpha_tune_string; /* -mtune= */
84 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
85 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
86 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
87 const char *alpha_mlat_string; /* -mmemory-latency= */
88 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
89
90 /* Save information from a "cmpxx" operation until the branch or scc is
91 emitted. */
92
93 struct alpha_compare alpha_compare;
94
95 /* Nonzero if inside of a function, because the Alpha asm can't
96 handle .files inside of functions. */
97
98 static int inside_function = FALSE;
99
100 /* The number of cycles of latency we should assume on memory reads. */
101
102 int alpha_memory_latency = 3;
103
104 /* Whether the function needs the GP. */
105
106 static int alpha_function_needs_gp;
107
108 /* The alias set for prologue/epilogue register save/restore. */
109
110 static GTY(()) int alpha_sr_alias_set;
111
112 /* The assembler name of the current function. */
113
114 static const char *alpha_fnname;
115
116 /* The next explicit relocation sequence number. */
117 extern GTY(()) int alpha_next_sequence_number;
118 int alpha_next_sequence_number = 1;
119
120 /* The literal and gpdisp sequence numbers for this insn, as printed
121 by %# and %* respectively. */
122 extern GTY(()) int alpha_this_literal_sequence_number;
123 extern GTY(()) int alpha_this_gpdisp_sequence_number;
124 int alpha_this_literal_sequence_number;
125 int alpha_this_gpdisp_sequence_number;
126
127 /* Costs of various operations on the different architectures. */
128
129 struct alpha_rtx_cost_data
130 {
131 unsigned char fp_add;
132 unsigned char fp_mult;
133 unsigned char fp_div_sf;
134 unsigned char fp_div_df;
135 unsigned char int_mult_si;
136 unsigned char int_mult_di;
137 unsigned char int_shift;
138 unsigned char int_cmov;
139 };
140
141 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
142 {
143 { /* EV4 */
144 COSTS_N_INSNS (6), /* fp_add */
145 COSTS_N_INSNS (6), /* fp_mult */
146 COSTS_N_INSNS (34), /* fp_div_sf */
147 COSTS_N_INSNS (63), /* fp_div_df */
148 COSTS_N_INSNS (23), /* int_mult_si */
149 COSTS_N_INSNS (23), /* int_mult_di */
150 COSTS_N_INSNS (2), /* int_shift */
151 COSTS_N_INSNS (2), /* int_cmov */
152 },
153 { /* EV5 */
154 COSTS_N_INSNS (4), /* fp_add */
155 COSTS_N_INSNS (4), /* fp_mult */
156 COSTS_N_INSNS (15), /* fp_div_sf */
157 COSTS_N_INSNS (22), /* fp_div_df */
158 COSTS_N_INSNS (8), /* int_mult_si */
159 COSTS_N_INSNS (12), /* int_mult_di */
160 COSTS_N_INSNS (1) + 1, /* int_shift */
161 COSTS_N_INSNS (1), /* int_cmov */
162 },
163 { /* EV6 */
164 COSTS_N_INSNS (4), /* fp_add */
165 COSTS_N_INSNS (4), /* fp_mult */
166 COSTS_N_INSNS (12), /* fp_div_sf */
167 COSTS_N_INSNS (15), /* fp_div_df */
168 COSTS_N_INSNS (7), /* int_mult_si */
169 COSTS_N_INSNS (7), /* int_mult_di */
170 COSTS_N_INSNS (1), /* int_shift */
171 COSTS_N_INSNS (2), /* int_cmov */
172 },
173 };
174
175 /* Get the number of args of a function in one of two ways. */
176 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
177 #define NUM_ARGS current_function_args_info.num_args
178 #else
179 #define NUM_ARGS current_function_args_info
180 #endif
181
182 #define REG_PV 27
183 #define REG_RA 26
184
185 /* Declarations of static functions. */
186 static struct machine_function *alpha_init_machine_status (void);
187 static rtx alpha_emit_xfloating_compare (enum rtx_code, rtx, rtx);
188
189 #if TARGET_ABI_OPEN_VMS
190 static void alpha_write_linkage (FILE *, const char *, tree);
191 #endif
192
193 static void unicosmk_output_deferred_case_vectors (FILE *);
194 static void unicosmk_gen_dsib (unsigned long *);
195 static void unicosmk_output_ssib (FILE *, const char *);
196 static int unicosmk_need_dex (rtx);
197 \f
198 /* Parse target option strings. */
199
200 void
201 override_options (void)
202 {
203 int i;
204 static const struct cpu_table {
205 const char *const name;
206 const enum processor_type processor;
207 const int flags;
208 } cpu_table[] = {
209 #define EV5_MASK (MASK_CPU_EV5)
210 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
211 { "ev4", PROCESSOR_EV4, 0 },
212 { "ev45", PROCESSOR_EV4, 0 },
213 { "21064", PROCESSOR_EV4, 0 },
214 { "ev5", PROCESSOR_EV5, EV5_MASK },
215 { "21164", PROCESSOR_EV5, EV5_MASK },
216 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
217 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
218 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
219 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
220 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
221 { "ev6", PROCESSOR_EV6, EV6_MASK },
222 { "21264", PROCESSOR_EV6, EV6_MASK },
223 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
224 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
225 { 0, 0, 0 }
226 };
227
228 /* Unicos/Mk doesn't have shared libraries. */
229 if (TARGET_ABI_UNICOSMK && flag_pic)
230 {
231 warning ("-f%s ignored for Unicos/Mk (not supported)",
232 (flag_pic > 1) ? "PIC" : "pic");
233 flag_pic = 0;
234 }
235
236 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
237 floating-point instructions. Make that the default for this target. */
238 if (TARGET_ABI_UNICOSMK)
239 alpha_fprm = ALPHA_FPRM_DYN;
240 else
241 alpha_fprm = ALPHA_FPRM_NORM;
242
243 alpha_tp = ALPHA_TP_PROG;
244 alpha_fptm = ALPHA_FPTM_N;
245
246 /* We cannot use su and sui qualifiers for conversion instructions on
247 Unicos/Mk. I'm not sure if this is due to assembler or hardware
248 limitations. Right now, we issue a warning if -mieee is specified
249 and then ignore it; eventually, we should either get it right or
250 disable the option altogether. */
251
252 if (TARGET_IEEE)
253 {
254 if (TARGET_ABI_UNICOSMK)
255 warning ("-mieee not supported on Unicos/Mk");
256 else
257 {
258 alpha_tp = ALPHA_TP_INSN;
259 alpha_fptm = ALPHA_FPTM_SU;
260 }
261 }
262
263 if (TARGET_IEEE_WITH_INEXACT)
264 {
265 if (TARGET_ABI_UNICOSMK)
266 warning ("-mieee-with-inexact not supported on Unicos/Mk");
267 else
268 {
269 alpha_tp = ALPHA_TP_INSN;
270 alpha_fptm = ALPHA_FPTM_SUI;
271 }
272 }
273
274 if (alpha_tp_string)
275 {
276 if (! strcmp (alpha_tp_string, "p"))
277 alpha_tp = ALPHA_TP_PROG;
278 else if (! strcmp (alpha_tp_string, "f"))
279 alpha_tp = ALPHA_TP_FUNC;
280 else if (! strcmp (alpha_tp_string, "i"))
281 alpha_tp = ALPHA_TP_INSN;
282 else
283 error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
284 }
285
286 if (alpha_fprm_string)
287 {
288 if (! strcmp (alpha_fprm_string, "n"))
289 alpha_fprm = ALPHA_FPRM_NORM;
290 else if (! strcmp (alpha_fprm_string, "m"))
291 alpha_fprm = ALPHA_FPRM_MINF;
292 else if (! strcmp (alpha_fprm_string, "c"))
293 alpha_fprm = ALPHA_FPRM_CHOP;
294 else if (! strcmp (alpha_fprm_string,"d"))
295 alpha_fprm = ALPHA_FPRM_DYN;
296 else
297 error ("bad value `%s' for -mfp-rounding-mode switch",
298 alpha_fprm_string);
299 }
300
301 if (alpha_fptm_string)
302 {
303 if (strcmp (alpha_fptm_string, "n") == 0)
304 alpha_fptm = ALPHA_FPTM_N;
305 else if (strcmp (alpha_fptm_string, "u") == 0)
306 alpha_fptm = ALPHA_FPTM_U;
307 else if (strcmp (alpha_fptm_string, "su") == 0)
308 alpha_fptm = ALPHA_FPTM_SU;
309 else if (strcmp (alpha_fptm_string, "sui") == 0)
310 alpha_fptm = ALPHA_FPTM_SUI;
311 else
312 error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
313 }
314
315 if (alpha_tls_size_string)
316 {
317 if (strcmp (alpha_tls_size_string, "16") == 0)
318 alpha_tls_size = 16;
319 else if (strcmp (alpha_tls_size_string, "32") == 0)
320 alpha_tls_size = 32;
321 else if (strcmp (alpha_tls_size_string, "64") == 0)
322 alpha_tls_size = 64;
323 else
324 error ("bad value `%s' for -mtls-size switch", alpha_tls_size_string);
325 }
326
327 alpha_cpu
328 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
329 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
330
331 if (alpha_cpu_string)
332 {
333 for (i = 0; cpu_table [i].name; i++)
334 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
335 {
336 alpha_cpu = cpu_table [i].processor;
337 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
338 | MASK_CPU_EV5 | MASK_CPU_EV6);
339 target_flags |= cpu_table [i].flags;
340 break;
341 }
342 if (! cpu_table [i].name)
343 error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
344 }
345
346 if (alpha_tune_string)
347 {
348 for (i = 0; cpu_table [i].name; i++)
349 if (! strcmp (alpha_tune_string, cpu_table [i].name))
350 {
351 alpha_cpu = cpu_table [i].processor;
352 break;
353 }
354 if (! cpu_table [i].name)
355 error ("bad value `%s' for -mcpu switch", alpha_tune_string);
356 }
357
358 /* Do some sanity checks on the above options. */
359
360 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
361 {
362 warning ("trap mode not supported on Unicos/Mk");
363 alpha_fptm = ALPHA_FPTM_N;
364 }
365
366 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
367 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
368 {
369 warning ("fp software completion requires -mtrap-precision=i");
370 alpha_tp = ALPHA_TP_INSN;
371 }
372
373 if (TARGET_CPU_EV6)
374 {
375 /* Except for EV6 pass 1 (not released), we always have precise
376 arithmetic traps. Which means we can do software completion
377 without minding trap shadows. */
378 alpha_tp = ALPHA_TP_PROG;
379 }
380
381 if (TARGET_FLOAT_VAX)
382 {
383 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
384 {
385 warning ("rounding mode not supported for VAX floats");
386 alpha_fprm = ALPHA_FPRM_NORM;
387 }
388 if (alpha_fptm == ALPHA_FPTM_SUI)
389 {
390 warning ("trap mode not supported for VAX floats");
391 alpha_fptm = ALPHA_FPTM_SU;
392 }
393 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
394 warning ("128-bit long double not supported for VAX floats");
395 target_flags &= ~MASK_LONG_DOUBLE_128;
396 }
397
398 {
399 char *end;
400 int lat;
401
402 if (!alpha_mlat_string)
403 alpha_mlat_string = "L1";
404
405 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
406 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
407 ;
408 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
409 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
410 && alpha_mlat_string[2] == '\0')
411 {
412 static int const cache_latency[][4] =
413 {
414 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
415 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
416 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
417 };
418
419 lat = alpha_mlat_string[1] - '0';
420 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
421 {
422 warning ("L%d cache latency unknown for %s",
423 lat, alpha_cpu_name[alpha_cpu]);
424 lat = 3;
425 }
426 else
427 lat = cache_latency[alpha_cpu][lat-1];
428 }
429 else if (! strcmp (alpha_mlat_string, "main"))
430 {
431 /* Most current memories have about 370ns latency. This is
432 a reasonable guess for a fast cpu. */
433 lat = 150;
434 }
435 else
436 {
437 warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
438 lat = 3;
439 }
440
441 alpha_memory_latency = lat;
442 }
443
444 /* Default the definition of "small data" to 8 bytes. */
445 if (!g_switch_set)
446 g_switch_value = 8;
447
448 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
449 if (flag_pic == 1)
450 target_flags |= MASK_SMALL_DATA;
451 else if (flag_pic == 2)
452 target_flags &= ~MASK_SMALL_DATA;
453
454 /* Align labels and loops for optimal branching. */
455 /* ??? Kludge these by not doing anything if we don't optimize and also if
456 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
457 if (optimize > 0 && write_symbols != SDB_DEBUG)
458 {
459 if (align_loops <= 0)
460 align_loops = 16;
461 if (align_jumps <= 0)
462 align_jumps = 16;
463 }
464 if (align_functions <= 0)
465 align_functions = 16;
466
467 /* Acquire a unique set number for our register saves and restores. */
468 alpha_sr_alias_set = new_alias_set ();
469
470 /* Register variables and functions with the garbage collector. */
471
472 /* Set up function hooks. */
473 init_machine_status = alpha_init_machine_status;
474
475 /* Tell the compiler when we're using VAX floating point. */
476 if (TARGET_FLOAT_VAX)
477 {
478 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
479 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
480 REAL_MODE_FORMAT (TFmode) = NULL;
481 }
482 }
483 \f
484 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
485
486 int
487 zap_mask (HOST_WIDE_INT value)
488 {
489 int i;
490
491 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
492 i++, value >>= 8)
493 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
494 return 0;
495
496 return 1;
497 }
498
499 /* Returns 1 if OP is either the constant zero or a register. If a
500 register, it must be in the proper mode unless MODE is VOIDmode. */
501
502 int
503 reg_or_0_operand (rtx op, enum machine_mode mode)
504 {
505 return op == CONST0_RTX (mode) || register_operand (op, mode);
506 }
507
508 /* Return 1 if OP is a constant in the range of 0-63 (for a shift) or
509 any register. */
510
511 int
512 reg_or_6bit_operand (rtx op, enum machine_mode mode)
513 {
514 return ((GET_CODE (op) == CONST_INT
515 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
516 || register_operand (op, mode));
517 }
518
519
520 /* Return 1 if OP is an 8-bit constant or any register. */
521
522 int
523 reg_or_8bit_operand (rtx op, enum machine_mode mode)
524 {
525 return ((GET_CODE (op) == CONST_INT
526 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
527 || register_operand (op, mode));
528 }
529
530 /* Return 1 if OP is a constant or any register. */
531
532 int
533 reg_or_const_int_operand (rtx op, enum machine_mode mode)
534 {
535 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
536 }
537
538 /* Return 1 if OP is an 8-bit constant. */
539
540 int
541 cint8_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
542 {
543 return ((GET_CODE (op) == CONST_INT
544 && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100));
545 }
546
547 /* Return 1 if the operand is a valid second operand to an add insn. */
548
549 int
550 add_operand (rtx op, enum machine_mode mode)
551 {
552 if (GET_CODE (op) == CONST_INT)
553 /* Constraints I, J, O and P are covered by K. */
554 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
555 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
556
557 return register_operand (op, mode);
558 }
559
560 /* Return 1 if the operand is a valid second operand to a sign-extending
561 add insn. */
562
563 int
564 sext_add_operand (rtx op, enum machine_mode mode)
565 {
566 if (GET_CODE (op) == CONST_INT)
567 return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'I')
568 || CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
569
570 return reg_not_elim_operand (op, mode);
571 }
572
573 /* Return 1 if OP is the constant 4 or 8. */
574
575 int
576 const48_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
577 {
578 return (GET_CODE (op) == CONST_INT
579 && (INTVAL (op) == 4 || INTVAL (op) == 8));
580 }
581
582 /* Return 1 if OP is a valid first operand to an AND insn. */
583
584 int
585 and_operand (rtx op, enum machine_mode mode)
586 {
587 if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == VOIDmode)
588 return (zap_mask (CONST_DOUBLE_LOW (op))
589 && zap_mask (CONST_DOUBLE_HIGH (op)));
590
591 if (GET_CODE (op) == CONST_INT)
592 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
593 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
594 || zap_mask (INTVAL (op)));
595
596 return register_operand (op, mode);
597 }
598
599 /* Return 1 if OP is a valid first operand to an IOR or XOR insn. */
600
601 int
602 or_operand (rtx op, enum machine_mode mode)
603 {
604 if (GET_CODE (op) == CONST_INT)
605 return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
606 || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
607
608 return register_operand (op, mode);
609 }
610
611 /* Return 1 if OP is a constant that is the width, in bits, of an integral
612 mode smaller than DImode. */
613
614 int
615 mode_width_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
616 {
617 return (GET_CODE (op) == CONST_INT
618 && (INTVAL (op) == 8 || INTVAL (op) == 16
619 || INTVAL (op) == 32 || INTVAL (op) == 64));
620 }
621
622 /* Return 1 if OP is a constant that is the width of an integral machine mode
623 smaller than an integer. */
624
625 int
626 mode_mask_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
627 {
628 if (GET_CODE (op) == CONST_INT)
629 {
630 HOST_WIDE_INT value = INTVAL (op);
631
632 if (value == 0xff)
633 return 1;
634 if (value == 0xffff)
635 return 1;
636 if (value == 0xffffffff)
637 return 1;
638 if (value == -1)
639 return 1;
640 }
641 else if (HOST_BITS_PER_WIDE_INT == 32 && GET_CODE (op) == CONST_DOUBLE)
642 {
643 if (CONST_DOUBLE_LOW (op) == 0xffffffff && CONST_DOUBLE_HIGH (op) == 0)
644 return 1;
645 }
646
647 return 0;
648 }
649
650 /* Return 1 if OP is a multiple of 8 less than 64. */
651
652 int
653 mul8_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
654 {
655 return (GET_CODE (op) == CONST_INT
656 && (unsigned HOST_WIDE_INT) INTVAL (op) < 64
657 && (INTVAL (op) & 7) == 0);
658 }
659
660 /* Return 1 if OP is the zero constant for MODE. */
661
662 int
663 const0_operand (rtx op, enum machine_mode mode)
664 {
665 return op == CONST0_RTX (mode);
666 }
667
668 /* Return 1 if OP is a hard floating-point register. */
669
670 int
671 hard_fp_register_operand (rtx op, enum machine_mode mode)
672 {
673 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
674 return 0;
675
676 if (GET_CODE (op) == SUBREG)
677 op = SUBREG_REG (op);
678 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS;
679 }
680
681 /* Return 1 if OP is a hard general register. */
682
683 int
684 hard_int_register_operand (rtx op, enum machine_mode mode)
685 {
686 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
687 return 0;
688
689 if (GET_CODE (op) == SUBREG)
690 op = SUBREG_REG (op);
691 return GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS;
692 }
693
694 /* Return 1 if OP is a register or a constant integer. */
695
696
697 int
698 reg_or_cint_operand (rtx op, enum machine_mode mode)
699 {
700 return (GET_CODE (op) == CONST_INT
701 || register_operand (op, mode));
702 }
703
704 /* Return 1 if OP is something that can be reloaded into a register;
705 if it is a MEM, it need not be valid. */
706
707 int
708 some_operand (rtx op, enum machine_mode mode)
709 {
710 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
711 return 0;
712
713 switch (GET_CODE (op))
714 {
715 case REG:
716 case MEM:
717 case CONST_INT:
718 case CONST_DOUBLE:
719 case CONST_VECTOR:
720 case LABEL_REF:
721 case SYMBOL_REF:
722 case CONST:
723 case HIGH:
724 return 1;
725
726 case SUBREG:
727 return some_operand (SUBREG_REG (op), VOIDmode);
728
729 default:
730 break;
731 }
732
733 return 0;
734 }
735
736 /* Likewise, but don't accept constants. */
737
738 int
739 some_ni_operand (rtx op, enum machine_mode mode)
740 {
741 if (GET_MODE (op) != mode && mode != VOIDmode)
742 return 0;
743
744 if (GET_CODE (op) == SUBREG)
745 op = SUBREG_REG (op);
746
747 return (GET_CODE (op) == REG || GET_CODE (op) == MEM);
748 }
749
750 /* Return 1 if OP is a valid operand for the source of a move insn. */
751
752 int
753 input_operand (rtx op, enum machine_mode mode)
754 {
755 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
756 return 0;
757
758 if (GET_MODE_CLASS (mode) == MODE_FLOAT && GET_MODE (op) != mode)
759 return 0;
760
761 switch (GET_CODE (op))
762 {
763 case LABEL_REF:
764 case SYMBOL_REF:
765 case CONST:
766 if (TARGET_EXPLICIT_RELOCS)
767 {
768 /* We don't split symbolic operands into something unintelligable
769 until after reload, but we do not wish non-small, non-global
770 symbolic operands to be reconstructed from their high/lo_sum
771 form. */
772 return (small_symbolic_operand (op, mode)
773 || global_symbolic_operand (op, mode)
774 || gotdtp_symbolic_operand (op, mode)
775 || gottp_symbolic_operand (op, mode));
776 }
777
778 /* This handles both the Windows/NT and OSF cases. */
779 return mode == ptr_mode || mode == DImode;
780
781 case HIGH:
782 return (TARGET_EXPLICIT_RELOCS
783 && local_symbolic_operand (XEXP (op, 0), mode));
784
785 case REG:
786 case ADDRESSOF:
787 return 1;
788
789 case SUBREG:
790 if (register_operand (op, mode))
791 return 1;
792 /* ... fall through ... */
793 case MEM:
794 return ((TARGET_BWX || (mode != HImode && mode != QImode))
795 && general_operand (op, mode));
796
797 case CONST_DOUBLE:
798 case CONST_VECTOR:
799 return op == CONST0_RTX (mode);
800
801 case CONST_INT:
802 return mode == QImode || mode == HImode || add_operand (op, mode);
803
804 case CONSTANT_P_RTX:
805 return 1;
806
807 default:
808 break;
809 }
810
811 return 0;
812 }
813
814 /* Return 1 if OP is a SYMBOL_REF for a function known to be in this
815 file, and in the same section as the current function. */
816
817 int
818 samegp_function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
819 {
820 if (GET_CODE (op) != SYMBOL_REF)
821 return false;
822
823 /* Easy test for recursion. */
824 if (op == XEXP (DECL_RTL (current_function_decl), 0))
825 return true;
826
827 /* Functions that are not local can be overridden, and thus may
828 not share the same gp. */
829 if (! SYMBOL_REF_LOCAL_P (op))
830 return false;
831
832 /* If -msmall-data is in effect, assume that there is only one GP
833 for the module, and so any local symbol has this property. We
834 need explicit relocations to be able to enforce this for symbols
835 not defined in this unit of translation, however. */
836 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
837 return true;
838
839 /* Functions that are not external are defined in this UoT,
840 and thus must share the same gp. */
841 return ! SYMBOL_REF_EXTERNAL_P (op);
842 }
843
844 /* Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. */
845
846 int
847 direct_call_operand (rtx op, enum machine_mode mode)
848 {
849 tree op_decl, cfun_sec, op_sec;
850
851 /* Must share the same GP. */
852 if (!samegp_function_operand (op, mode))
853 return false;
854
855 /* If profiling is implemented via linker tricks, we can't jump
856 to the nogp alternate entry point. Note that current_function_profile
857 would not be correct, since that doesn't indicate if the target
858 function uses profiling. */
859 /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test,
860 but is approximately correct for the OSF ABIs. Don't know
861 what to do for VMS, NT, or UMK. */
862 if (!TARGET_PROFILING_NEEDS_GP && profile_flag)
863 return false;
864
865 /* Must be a function. In some cases folks create thunks in static
866 data structures and then make calls to them. If we allow the
867 direct call, we'll get an error from the linker about !samegp reloc
868 against a symbol without a .prologue directive. */
869 if (!SYMBOL_REF_FUNCTION_P (op))
870 return false;
871
872 /* Must be "near" so that the branch is assumed to reach. With
873 -msmall-text, this is assumed true of all local symbols. Since
874 we've already checked samegp, locality is already assured. */
875 if (TARGET_SMALL_TEXT)
876 return true;
877
878 /* Otherwise, a decl is "near" if it is defined in the same section. */
879 if (flag_function_sections)
880 return false;
881
882 op_decl = SYMBOL_REF_DECL (op);
883 if (DECL_ONE_ONLY (current_function_decl)
884 || (op_decl && DECL_ONE_ONLY (op_decl)))
885 return false;
886
887 cfun_sec = DECL_SECTION_NAME (current_function_decl);
888 op_sec = op_decl ? DECL_SECTION_NAME (op_decl) : NULL;
889 return ((!cfun_sec && !op_sec)
890 || (cfun_sec && op_sec
891 && strcmp (TREE_STRING_POINTER (cfun_sec),
892 TREE_STRING_POINTER (op_sec)) == 0));
893 }
894
895 /* Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing
896 a (non-tls) variable known to be defined in this file. */
897
898 int
899 local_symbolic_operand (rtx op, enum machine_mode mode)
900 {
901 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
902 return 0;
903
904 if (GET_CODE (op) == LABEL_REF)
905 return 1;
906
907 if (GET_CODE (op) == CONST
908 && GET_CODE (XEXP (op, 0)) == PLUS
909 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
910 op = XEXP (XEXP (op, 0), 0);
911
912 if (GET_CODE (op) != SYMBOL_REF)
913 return 0;
914
915 return SYMBOL_REF_LOCAL_P (op) && !SYMBOL_REF_TLS_MODEL (op);
916 }
917
918 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
919 known to be defined in this file in the small data area. */
920
921 int
922 small_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
923 {
924 if (! TARGET_SMALL_DATA)
925 return 0;
926
927 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
928 return 0;
929
930 if (GET_CODE (op) == CONST
931 && GET_CODE (XEXP (op, 0)) == PLUS
932 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
933 op = XEXP (XEXP (op, 0), 0);
934
935 if (GET_CODE (op) != SYMBOL_REF)
936 return 0;
937
938 /* ??? There's no encode_section_info equivalent for the rtl
939 constant pool, so SYMBOL_FLAG_SMALL never gets set. */
940 if (CONSTANT_POOL_ADDRESS_P (op))
941 return GET_MODE_SIZE (get_pool_mode (op)) <= g_switch_value;
942
943 return (SYMBOL_REF_LOCAL_P (op)
944 && SYMBOL_REF_SMALL_P (op)
945 && SYMBOL_REF_TLS_MODEL (op) == 0);
946 }
947
948 /* Return true if OP is a SYMBOL_REF or CONST referencing a variable
949 not known (or known not) to be defined in this file. */
950
951 int
952 global_symbolic_operand (rtx op, enum machine_mode mode)
953 {
954 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
955 return 0;
956
957 if (GET_CODE (op) == CONST
958 && GET_CODE (XEXP (op, 0)) == PLUS
959 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
960 op = XEXP (XEXP (op, 0), 0);
961
962 if (GET_CODE (op) != SYMBOL_REF)
963 return 0;
964
965 return !SYMBOL_REF_LOCAL_P (op) && !SYMBOL_REF_TLS_MODEL (op);
966 }
967
968 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
969
970 int
971 call_operand (rtx op, enum machine_mode mode)
972 {
973 if (mode != Pmode)
974 return 0;
975
976 if (GET_CODE (op) == REG)
977 {
978 if (TARGET_ABI_OSF)
979 {
980 /* Disallow virtual registers to cope with pathological test cases
981 such as compile/930117-1.c in which the virtual reg decomposes
982 to the frame pointer. Which is a hard reg that is not $27. */
983 return (REGNO (op) == 27 || REGNO (op) > LAST_VIRTUAL_REGISTER);
984 }
985 else
986 return 1;
987 }
988 if (TARGET_ABI_UNICOSMK)
989 return 0;
990 if (GET_CODE (op) == SYMBOL_REF)
991 return 1;
992
993 return 0;
994 }
995
996 /* Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
997 possibly with an offset. */
998
999 int
1000 symbolic_operand (rtx op, enum machine_mode mode)
1001 {
1002 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1003 return 0;
1004 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
1005 return 1;
1006 if (GET_CODE (op) == CONST
1007 && GET_CODE (XEXP (op,0)) == PLUS
1008 && GET_CODE (XEXP (XEXP (op,0), 0)) == SYMBOL_REF
1009 && GET_CODE (XEXP (XEXP (op,0), 1)) == CONST_INT)
1010 return 1;
1011 return 0;
1012 }
1013
1014 /* Return true if OP is valid for a particular TLS relocation. */
1015
1016 static int
1017 tls_symbolic_operand_1 (rtx op, enum machine_mode mode, int size, int unspec)
1018 {
1019 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1020 return 0;
1021
1022 if (GET_CODE (op) != CONST)
1023 return 0;
1024 op = XEXP (op, 0);
1025
1026 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
1027 return 0;
1028 op = XVECEXP (op, 0, 0);
1029
1030 if (GET_CODE (op) != SYMBOL_REF)
1031 return 0;
1032
1033 if (SYMBOL_REF_LOCAL_P (op))
1034 {
1035 if (alpha_tls_size > size)
1036 return 0;
1037 }
1038 else
1039 {
1040 if (size != 64)
1041 return 0;
1042 }
1043
1044 switch (SYMBOL_REF_TLS_MODEL (op))
1045 {
1046 case TLS_MODEL_LOCAL_DYNAMIC:
1047 return unspec == UNSPEC_DTPREL;
1048 case TLS_MODEL_INITIAL_EXEC:
1049 return unspec == UNSPEC_TPREL && size == 64;
1050 case TLS_MODEL_LOCAL_EXEC:
1051 return unspec == UNSPEC_TPREL;
1052 default:
1053 abort ();
1054 }
1055 }
1056
1057 /* Return true if OP is valid for 16-bit DTP relative relocations. */
1058
1059 int
1060 dtp16_symbolic_operand (rtx op, enum machine_mode mode)
1061 {
1062 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_DTPREL);
1063 }
1064
1065 /* Return true if OP is valid for 32-bit DTP relative relocations. */
1066
1067 int
1068 dtp32_symbolic_operand (rtx op, enum machine_mode mode)
1069 {
1070 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_DTPREL);
1071 }
1072
1073 /* Return true if OP is valid for 64-bit DTP relative relocations. */
1074
1075 int
1076 gotdtp_symbolic_operand (rtx op, enum machine_mode mode)
1077 {
1078 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_DTPREL);
1079 }
1080
1081 /* Return true if OP is valid for 16-bit TP relative relocations. */
1082
1083 int
1084 tp16_symbolic_operand (rtx op, enum machine_mode mode)
1085 {
1086 return tls_symbolic_operand_1 (op, mode, 16, UNSPEC_TPREL);
1087 }
1088
1089 /* Return true if OP is valid for 32-bit TP relative relocations. */
1090
1091 int
1092 tp32_symbolic_operand (rtx op, enum machine_mode mode)
1093 {
1094 return tls_symbolic_operand_1 (op, mode, 32, UNSPEC_TPREL);
1095 }
1096
1097 /* Return true if OP is valid for 64-bit TP relative relocations. */
1098
1099 int
1100 gottp_symbolic_operand (rtx op, enum machine_mode mode)
1101 {
1102 return tls_symbolic_operand_1 (op, mode, 64, UNSPEC_TPREL);
1103 }
1104
1105 /* Return 1 if OP is a valid Alpha comparison operator. Here we know which
1106 comparisons are valid in which insn. */
1107
1108 int
1109 alpha_comparison_operator (rtx op, enum machine_mode mode)
1110 {
1111 enum rtx_code code = GET_CODE (op);
1112
1113 if (mode != GET_MODE (op) && mode != VOIDmode)
1114 return 0;
1115
1116 return (code == EQ || code == LE || code == LT
1117 || code == LEU || code == LTU);
1118 }
1119
1120 /* Return 1 if OP is a valid Alpha comparison operator against zero.
1121 Here we know which comparisons are valid in which insn. */
1122
1123 int
1124 alpha_zero_comparison_operator (rtx op, enum machine_mode mode)
1125 {
1126 enum rtx_code code = GET_CODE (op);
1127
1128 if (mode != GET_MODE (op) && mode != VOIDmode)
1129 return 0;
1130
1131 return (code == EQ || code == NE || code == LE || code == LT
1132 || code == LEU || code == LTU);
1133 }
1134
1135 /* Return 1 if OP is a valid Alpha swapped comparison operator. */
1136
1137 int
1138 alpha_swapped_comparison_operator (rtx op, enum machine_mode mode)
1139 {
1140 enum rtx_code code;
1141
1142 if ((mode != GET_MODE (op) && mode != VOIDmode)
1143 || !COMPARISON_P (op))
1144 return 0;
1145
1146 code = swap_condition (GET_CODE (op));
1147 return (code == EQ || code == LE || code == LT
1148 || code == LEU || code == LTU);
1149 }
1150
1151 /* Return 1 if OP is a signed comparison operation. */
1152
1153 int
1154 signed_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1155 {
1156 enum rtx_code code = GET_CODE (op);
1157
1158 if (mode != GET_MODE (op) && mode != VOIDmode)
1159 return 0;
1160
1161 return (code == EQ || code == NE
1162 || code == LE || code == LT
1163 || code == GE || code == GT);
1164 }
1165
1166 /* Return 1 if OP is a valid Alpha floating point comparison operator.
1167 Here we know which comparisons are valid in which insn. */
1168
1169 int
1170 alpha_fp_comparison_operator (rtx op, enum machine_mode mode)
1171 {
1172 enum rtx_code code = GET_CODE (op);
1173
1174 if (mode != GET_MODE (op) && mode != VOIDmode)
1175 return 0;
1176
1177 return (code == EQ || code == LE || code == LT || code == UNORDERED);
1178 }
1179
1180 /* Return 1 if this is a divide or modulus operator. */
1181
1182 int
1183 divmod_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1184 {
1185 enum rtx_code code = GET_CODE (op);
1186
1187 return (code == DIV || code == MOD || code == UDIV || code == UMOD);
1188 }
1189
1190 /* Return 1 if this is a float->int conversion operator. */
1191
1192 int
1193 fix_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1194 {
1195 enum rtx_code code = GET_CODE (op);
1196
1197 return (code == FIX || code == UNSIGNED_FIX);
1198 }
1199
1200 /* Return 1 if this memory address is a known aligned register plus
1201 a constant. It must be a valid address. This means that we can do
1202 this as an aligned reference plus some offset.
1203
1204 Take into account what reload will do. */
1205
1206 int
1207 aligned_memory_operand (rtx op, enum machine_mode mode)
1208 {
1209 rtx base;
1210
1211 if (reload_in_progress)
1212 {
1213 rtx tmp = op;
1214 if (GET_CODE (tmp) == SUBREG)
1215 tmp = SUBREG_REG (tmp);
1216 if (GET_CODE (tmp) == REG
1217 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1218 {
1219 op = reg_equiv_memory_loc[REGNO (tmp)];
1220 if (op == 0)
1221 return 0;
1222 }
1223 }
1224
1225 if (GET_CODE (op) != MEM)
1226 return 0;
1227 if (MEM_ALIGN (op) >= 32)
1228 return 1;
1229 op = XEXP (op, 0);
1230
1231 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1232 sorts of constructs. Dig for the real base register. */
1233 if (reload_in_progress
1234 && GET_CODE (op) == PLUS
1235 && GET_CODE (XEXP (op, 0)) == PLUS)
1236 base = XEXP (XEXP (op, 0), 0);
1237 else
1238 {
1239 if (! memory_address_p (mode, op))
1240 return 0;
1241 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1242 }
1243
1244 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) >= 32);
1245 }
1246
1247 /* Similar, but return 1 if OP is a MEM which is not alignable. */
1248
1249 int
1250 unaligned_memory_operand (rtx op, enum machine_mode mode)
1251 {
1252 rtx base;
1253
1254 if (reload_in_progress)
1255 {
1256 rtx tmp = op;
1257 if (GET_CODE (tmp) == SUBREG)
1258 tmp = SUBREG_REG (tmp);
1259 if (GET_CODE (tmp) == REG
1260 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1261 {
1262 op = reg_equiv_memory_loc[REGNO (tmp)];
1263 if (op == 0)
1264 return 0;
1265 }
1266 }
1267
1268 if (GET_CODE (op) != MEM)
1269 return 0;
1270 if (MEM_ALIGN (op) >= 32)
1271 return 0;
1272 op = XEXP (op, 0);
1273
1274 /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo)
1275 sorts of constructs. Dig for the real base register. */
1276 if (reload_in_progress
1277 && GET_CODE (op) == PLUS
1278 && GET_CODE (XEXP (op, 0)) == PLUS)
1279 base = XEXP (XEXP (op, 0), 0);
1280 else
1281 {
1282 if (! memory_address_p (mode, op))
1283 return 0;
1284 base = (GET_CODE (op) == PLUS ? XEXP (op, 0) : op);
1285 }
1286
1287 return (GET_CODE (base) == REG && REGNO_POINTER_ALIGN (REGNO (base)) < 32);
1288 }
1289
1290 /* Return 1 if OP is either a register or an unaligned memory location. */
1291
1292 int
1293 reg_or_unaligned_mem_operand (rtx op, enum machine_mode mode)
1294 {
1295 return register_operand (op, mode) || unaligned_memory_operand (op, mode);
1296 }
1297
1298 /* Return 1 if OP is any memory location. During reload a pseudo matches. */
1299
1300 int
1301 any_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1302 {
1303 return (GET_CODE (op) == MEM
1304 || (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
1305 || (reload_in_progress && GET_CODE (op) == REG
1306 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
1307 || (reload_in_progress && GET_CODE (op) == SUBREG
1308 && GET_CODE (SUBREG_REG (op)) == REG
1309 && REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
1310 }
1311
1312 /* Returns 1 if OP is not an eliminable register.
1313
1314 This exists to cure a pathological abort in the s8addq (et al) patterns,
1315
1316 long foo () { long t; bar(); return (long) &t * 26107; }
1317
1318 which run afoul of a hack in reload to cure a (presumably) similar
1319 problem with lea-type instructions on other targets. But there is
1320 one of us and many of them, so work around the problem by selectively
1321 preventing combine from making the optimization. */
1322
1323 int
1324 reg_not_elim_operand (rtx op, enum machine_mode mode)
1325 {
1326 rtx inner = op;
1327 if (GET_CODE (op) == SUBREG)
1328 inner = SUBREG_REG (op);
1329 if (inner == frame_pointer_rtx || inner == arg_pointer_rtx)
1330 return 0;
1331
1332 return register_operand (op, mode);
1333 }
1334
1335 /* Return 1 is OP is a memory location that is not a reference (using
1336 an AND) to an unaligned location. Take into account what reload
1337 will do. */
1338
1339 int
1340 normal_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1341 {
1342 if (reload_in_progress)
1343 {
1344 rtx tmp = op;
1345 if (GET_CODE (tmp) == SUBREG)
1346 tmp = SUBREG_REG (tmp);
1347 if (GET_CODE (tmp) == REG
1348 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
1349 {
1350 op = reg_equiv_memory_loc[REGNO (tmp)];
1351
1352 /* This may not have been assigned an equivalent address if it will
1353 be eliminated. In that case, it doesn't matter what we do. */
1354 if (op == 0)
1355 return 1;
1356 }
1357 }
1358
1359 return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) != AND;
1360 }
1361
1362 /* Accept a register, but not a subreg of any kind. This allows us to
1363 avoid pathological cases in reload wrt data movement common in
1364 int->fp conversion. */
1365
1366 int
1367 reg_no_subreg_operand (rtx op, enum machine_mode mode)
1368 {
1369 if (GET_CODE (op) != REG)
1370 return 0;
1371 return register_operand (op, mode);
1372 }
1373
1374 /* Recognize an addition operation that includes a constant. Used to
1375 convince reload to canonize (plus (plus reg c1) c2) during register
1376 elimination. */
1377
1378 int
1379 addition_operation (rtx op, enum machine_mode mode)
1380 {
1381 if (GET_MODE (op) != mode && mode != VOIDmode)
1382 return 0;
1383 if (GET_CODE (op) == PLUS
1384 && register_operand (XEXP (op, 0), mode)
1385 && GET_CODE (XEXP (op, 1)) == CONST_INT
1386 && CONST_OK_FOR_LETTER_P (INTVAL (XEXP (op, 1)), 'K'))
1387 return 1;
1388 return 0;
1389 }
1390
1391 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
1392 the range defined for C in [I-P]. */
1393
1394 bool
1395 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
1396 {
1397 switch (c)
1398 {
1399 case 'I':
1400 /* An unsigned 8 bit constant. */
1401 return (unsigned HOST_WIDE_INT) value < 0x100;
1402 case 'J':
1403 /* The constant zero. */
1404 return value == 0;
1405 case 'K':
1406 /* A signed 16 bit constant. */
1407 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
1408 case 'L':
1409 /* A shifted signed 16 bit constant appropriate for LDAH. */
1410 return ((value & 0xffff) == 0
1411 && ((value) >> 31 == -1 || value >> 31 == 0));
1412 case 'M':
1413 /* A constant that can be AND'ed with using a ZAP insn. */
1414 return zap_mask (value);
1415 case 'N':
1416 /* A complemented unsigned 8 bit constant. */
1417 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
1418 case 'O':
1419 /* A negated unsigned 8 bit constant. */
1420 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
1421 case 'P':
1422 /* The constant 1, 2 or 3. */
1423 return value == 1 || value == 2 || value == 3;
1424
1425 default:
1426 return false;
1427 }
1428 }
1429
1430 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1431 matches for C in [GH]. */
1432
1433 bool
1434 alpha_const_double_ok_for_letter_p (rtx value, int c)
1435 {
1436 switch (c)
1437 {
1438 case 'G':
1439 /* The floating point zero constant. */
1440 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
1441 && value == CONST0_RTX (GET_MODE (value)));
1442
1443 case 'H':
1444 /* A valid operand of a ZAP insn. */
1445 return (GET_MODE (value) == VOIDmode
1446 && zap_mask (CONST_DOUBLE_LOW (value))
1447 && zap_mask (CONST_DOUBLE_HIGH (value)));
1448
1449 default:
1450 return false;
1451 }
1452 }
1453
1454 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
1455 matches for C. */
1456
1457 bool
1458 alpha_extra_constraint (rtx value, int c)
1459 {
1460 switch (c)
1461 {
1462 case 'Q':
1463 return normal_memory_operand (value, VOIDmode);
1464 case 'R':
1465 return direct_call_operand (value, Pmode);
1466 case 'S':
1467 return (GET_CODE (value) == CONST_INT
1468 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
1469 case 'T':
1470 return GET_CODE (value) == HIGH;
1471 case 'U':
1472 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
1473 case 'W':
1474 return (GET_CODE (value) == CONST_VECTOR
1475 && value == CONST0_RTX (GET_MODE (value)));
1476 default:
1477 return false;
1478 }
1479 }
1480
1481 /* Return 1 if this function can directly return via $26. */
1482
1483 int
1484 direct_return (void)
1485 {
1486 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
1487 && reload_completed
1488 && alpha_sa_size () == 0
1489 && get_frame_size () == 0
1490 && current_function_outgoing_args_size == 0
1491 && current_function_pretend_args_size == 0);
1492 }
1493
1494 /* Return the ADDR_VEC associated with a tablejump insn. */
1495
1496 rtx
1497 alpha_tablejump_addr_vec (rtx insn)
1498 {
1499 rtx tmp;
1500
1501 tmp = JUMP_LABEL (insn);
1502 if (!tmp)
1503 return NULL_RTX;
1504 tmp = NEXT_INSN (tmp);
1505 if (!tmp)
1506 return NULL_RTX;
1507 if (GET_CODE (tmp) == JUMP_INSN
1508 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
1509 return PATTERN (tmp);
1510 return NULL_RTX;
1511 }
1512
1513 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
1514
1515 rtx
1516 alpha_tablejump_best_label (rtx insn)
1517 {
1518 rtx jump_table = alpha_tablejump_addr_vec (insn);
1519 rtx best_label = NULL_RTX;
1520
1521 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
1522 there for edge frequency counts from profile data. */
1523
1524 if (jump_table)
1525 {
1526 int n_labels = XVECLEN (jump_table, 1);
1527 int best_count = -1;
1528 int i, j;
1529
1530 for (i = 0; i < n_labels; i++)
1531 {
1532 int count = 1;
1533
1534 for (j = i + 1; j < n_labels; j++)
1535 if (XEXP (XVECEXP (jump_table, 1, i), 0)
1536 == XEXP (XVECEXP (jump_table, 1, j), 0))
1537 count++;
1538
1539 if (count > best_count)
1540 best_count = count, best_label = XVECEXP (jump_table, 1, i);
1541 }
1542 }
1543
1544 return best_label ? best_label : const0_rtx;
1545 }
1546
1547 /* Return the TLS model to use for SYMBOL. */
1548
1549 static enum tls_model
1550 tls_symbolic_operand_type (rtx symbol)
1551 {
1552 enum tls_model model;
1553
1554 if (GET_CODE (symbol) != SYMBOL_REF)
1555 return 0;
1556 model = SYMBOL_REF_TLS_MODEL (symbol);
1557
1558 /* Local-exec with a 64-bit size is the same code as initial-exec. */
1559 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
1560 model = TLS_MODEL_INITIAL_EXEC;
1561
1562 return model;
1563 }
1564 \f
1565 /* Return true if the function DECL will share the same GP as any
1566 function in the current unit of translation. */
1567
1568 static bool
1569 decl_has_samegp (tree decl)
1570 {
1571 /* Functions that are not local can be overridden, and thus may
1572 not share the same gp. */
1573 if (!(*targetm.binds_local_p) (decl))
1574 return false;
1575
1576 /* If -msmall-data is in effect, assume that there is only one GP
1577 for the module, and so any local symbol has this property. We
1578 need explicit relocations to be able to enforce this for symbols
1579 not defined in this unit of translation, however. */
1580 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
1581 return true;
1582
1583 /* Functions that are not external are defined in this UoT. */
1584 /* ??? Irritatingly, static functions not yet emitted are still
1585 marked "external". Apply this to non-static functions only. */
1586 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
1587 }
1588
1589 /* Return true if EXP should be placed in the small data section. */
1590
1591 static bool
1592 alpha_in_small_data_p (tree exp)
1593 {
1594 /* We want to merge strings, so we never consider them small data. */
1595 if (TREE_CODE (exp) == STRING_CST)
1596 return false;
1597
1598 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
1599 {
1600 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
1601 if (strcmp (section, ".sdata") == 0
1602 || strcmp (section, ".sbss") == 0)
1603 return true;
1604 }
1605 else
1606 {
1607 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
1608
1609 /* If this is an incomplete type with size 0, then we can't put it
1610 in sdata because it might be too big when completed. */
1611 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
1612 return true;
1613 }
1614
1615 return false;
1616 }
1617
1618 #if TARGET_ABI_OPEN_VMS
1619 static bool
1620 alpha_linkage_symbol_p (const char *symname)
1621 {
1622 int symlen = strlen (symname);
1623
1624 if (symlen > 4)
1625 return strcmp (&symname [symlen - 4], "..lk") == 0;
1626
1627 return false;
1628 }
1629
1630 #define LINKAGE_SYMBOL_REF_P(X) \
1631 ((GET_CODE (X) == SYMBOL_REF \
1632 && alpha_linkage_symbol_p (XSTR (X, 0))) \
1633 || (GET_CODE (X) == CONST \
1634 && GET_CODE (XEXP (X, 0)) == PLUS \
1635 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
1636 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
1637 #endif
1638
1639 /* legitimate_address_p recognizes an RTL expression that is a valid
1640 memory address for an instruction. The MODE argument is the
1641 machine mode for the MEM expression that wants to use this address.
1642
1643 For Alpha, we have either a constant address or the sum of a
1644 register and a constant address, or just a register. For DImode,
1645 any of those forms can be surrounded with an AND that clear the
1646 low-order three bits; this is an "unaligned" access. */
1647
1648 bool
1649 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1650 {
1651 /* If this is an ldq_u type address, discard the outer AND. */
1652 if (mode == DImode
1653 && GET_CODE (x) == AND
1654 && GET_CODE (XEXP (x, 1)) == CONST_INT
1655 && INTVAL (XEXP (x, 1)) == -8)
1656 x = XEXP (x, 0);
1657
1658 /* Discard non-paradoxical subregs. */
1659 if (GET_CODE (x) == SUBREG
1660 && (GET_MODE_SIZE (GET_MODE (x))
1661 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1662 x = SUBREG_REG (x);
1663
1664 /* Unadorned general registers are valid. */
1665 if (REG_P (x)
1666 && (strict
1667 ? STRICT_REG_OK_FOR_BASE_P (x)
1668 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
1669 return true;
1670
1671 /* Constant addresses (i.e. +/- 32k) are valid. */
1672 if (CONSTANT_ADDRESS_P (x))
1673 return true;
1674
1675 #if TARGET_ABI_OPEN_VMS
1676 if (LINKAGE_SYMBOL_REF_P (x))
1677 return true;
1678 #endif
1679
1680 /* Register plus a small constant offset is valid. */
1681 if (GET_CODE (x) == PLUS)
1682 {
1683 rtx ofs = XEXP (x, 1);
1684 x = XEXP (x, 0);
1685
1686 /* Discard non-paradoxical subregs. */
1687 if (GET_CODE (x) == SUBREG
1688 && (GET_MODE_SIZE (GET_MODE (x))
1689 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1690 x = SUBREG_REG (x);
1691
1692 if (REG_P (x))
1693 {
1694 if (! strict
1695 && NONSTRICT_REG_OK_FP_BASE_P (x)
1696 && GET_CODE (ofs) == CONST_INT)
1697 return true;
1698 if ((strict
1699 ? STRICT_REG_OK_FOR_BASE_P (x)
1700 : NONSTRICT_REG_OK_FOR_BASE_P (x))
1701 && CONSTANT_ADDRESS_P (ofs))
1702 return true;
1703 }
1704 else if (GET_CODE (x) == ADDRESSOF
1705 && GET_CODE (ofs) == CONST_INT)
1706 return true;
1707 }
1708
1709 /* If we're managing explicit relocations, LO_SUM is valid, as
1710 are small data symbols. */
1711 else if (TARGET_EXPLICIT_RELOCS)
1712 {
1713 if (small_symbolic_operand (x, Pmode))
1714 return true;
1715
1716 if (GET_CODE (x) == LO_SUM)
1717 {
1718 rtx ofs = XEXP (x, 1);
1719 x = XEXP (x, 0);
1720
1721 /* Discard non-paradoxical subregs. */
1722 if (GET_CODE (x) == SUBREG
1723 && (GET_MODE_SIZE (GET_MODE (x))
1724 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1725 x = SUBREG_REG (x);
1726
1727 /* Must have a valid base register. */
1728 if (! (REG_P (x)
1729 && (strict
1730 ? STRICT_REG_OK_FOR_BASE_P (x)
1731 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
1732 return false;
1733
1734 /* The symbol must be local. */
1735 if (local_symbolic_operand (ofs, Pmode)
1736 || dtp32_symbolic_operand (ofs, Pmode)
1737 || tp32_symbolic_operand (ofs, Pmode))
1738 return true;
1739 }
1740 }
1741
1742 return false;
1743 }
1744
1745 /* Build the SYMBOL_REF for __tls_get_addr. */
1746
1747 static GTY(()) rtx tls_get_addr_libfunc;
1748
1749 static rtx
1750 get_tls_get_addr (void)
1751 {
1752 if (!tls_get_addr_libfunc)
1753 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
1754 return tls_get_addr_libfunc;
1755 }
1756
1757 /* Try machine-dependent ways of modifying an illegitimate address
1758 to be legitimate. If we find one, return the new, valid address. */
1759
1760 rtx
1761 alpha_legitimize_address (rtx x, rtx scratch,
1762 enum machine_mode mode ATTRIBUTE_UNUSED)
1763 {
1764 HOST_WIDE_INT addend;
1765
1766 /* If the address is (plus reg const_int) and the CONST_INT is not a
1767 valid offset, compute the high part of the constant and add it to
1768 the register. Then our address is (plus temp low-part-const). */
1769 if (GET_CODE (x) == PLUS
1770 && GET_CODE (XEXP (x, 0)) == REG
1771 && GET_CODE (XEXP (x, 1)) == CONST_INT
1772 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1773 {
1774 addend = INTVAL (XEXP (x, 1));
1775 x = XEXP (x, 0);
1776 goto split_addend;
1777 }
1778
1779 /* If the address is (const (plus FOO const_int)), find the low-order
1780 part of the CONST_INT. Then load FOO plus any high-order part of the
1781 CONST_INT into a register. Our address is (plus reg low-part-const).
1782 This is done to reduce the number of GOT entries. */
1783 if (!no_new_pseudos
1784 && GET_CODE (x) == CONST
1785 && GET_CODE (XEXP (x, 0)) == PLUS
1786 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1787 {
1788 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1789 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1790 goto split_addend;
1791 }
1792
1793 /* If we have a (plus reg const), emit the load as in (2), then add
1794 the two registers, and finally generate (plus reg low-part-const) as
1795 our address. */
1796 if (!no_new_pseudos
1797 && GET_CODE (x) == PLUS
1798 && GET_CODE (XEXP (x, 0)) == REG
1799 && GET_CODE (XEXP (x, 1)) == CONST
1800 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1801 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1802 {
1803 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1804 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1805 XEXP (XEXP (XEXP (x, 1), 0), 0),
1806 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1807 goto split_addend;
1808 }
1809
1810 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1811 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1812 {
1813 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1814
1815 switch (tls_symbolic_operand_type (x))
1816 {
1817 case TLS_MODEL_GLOBAL_DYNAMIC:
1818 start_sequence ();
1819
1820 r0 = gen_rtx_REG (Pmode, 0);
1821 r16 = gen_rtx_REG (Pmode, 16);
1822 tga = get_tls_get_addr ();
1823 dest = gen_reg_rtx (Pmode);
1824 seq = GEN_INT (alpha_next_sequence_number++);
1825
1826 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1827 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1828 insn = emit_call_insn (insn);
1829 CONST_OR_PURE_CALL_P (insn) = 1;
1830 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1831
1832 insn = get_insns ();
1833 end_sequence ();
1834
1835 emit_libcall_block (insn, dest, r0, x);
1836 return dest;
1837
1838 case TLS_MODEL_LOCAL_DYNAMIC:
1839 start_sequence ();
1840
1841 r0 = gen_rtx_REG (Pmode, 0);
1842 r16 = gen_rtx_REG (Pmode, 16);
1843 tga = get_tls_get_addr ();
1844 scratch = gen_reg_rtx (Pmode);
1845 seq = GEN_INT (alpha_next_sequence_number++);
1846
1847 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1848 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1849 insn = emit_call_insn (insn);
1850 CONST_OR_PURE_CALL_P (insn) = 1;
1851 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1852
1853 insn = get_insns ();
1854 end_sequence ();
1855
1856 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1857 UNSPEC_TLSLDM_CALL);
1858 emit_libcall_block (insn, scratch, r0, eqv);
1859
1860 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1861 eqv = gen_rtx_CONST (Pmode, eqv);
1862
1863 if (alpha_tls_size == 64)
1864 {
1865 dest = gen_reg_rtx (Pmode);
1866 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1867 emit_insn (gen_adddi3 (dest, dest, scratch));
1868 return dest;
1869 }
1870 if (alpha_tls_size == 32)
1871 {
1872 insn = gen_rtx_HIGH (Pmode, eqv);
1873 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1874 scratch = gen_reg_rtx (Pmode);
1875 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1876 }
1877 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1878
1879 case TLS_MODEL_INITIAL_EXEC:
1880 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1881 eqv = gen_rtx_CONST (Pmode, eqv);
1882 tp = gen_reg_rtx (Pmode);
1883 scratch = gen_reg_rtx (Pmode);
1884 dest = gen_reg_rtx (Pmode);
1885
1886 emit_insn (gen_load_tp (tp));
1887 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1888 emit_insn (gen_adddi3 (dest, tp, scratch));
1889 return dest;
1890
1891 case TLS_MODEL_LOCAL_EXEC:
1892 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1893 eqv = gen_rtx_CONST (Pmode, eqv);
1894 tp = gen_reg_rtx (Pmode);
1895
1896 emit_insn (gen_load_tp (tp));
1897 if (alpha_tls_size == 32)
1898 {
1899 insn = gen_rtx_HIGH (Pmode, eqv);
1900 insn = gen_rtx_PLUS (Pmode, tp, insn);
1901 tp = gen_reg_rtx (Pmode);
1902 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1903 }
1904 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1905 }
1906
1907 if (local_symbolic_operand (x, Pmode))
1908 {
1909 if (small_symbolic_operand (x, Pmode))
1910 return x;
1911 else
1912 {
1913 if (!no_new_pseudos)
1914 scratch = gen_reg_rtx (Pmode);
1915 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1916 gen_rtx_HIGH (Pmode, x)));
1917 return gen_rtx_LO_SUM (Pmode, scratch, x);
1918 }
1919 }
1920 }
1921
1922 return NULL;
1923
1924 split_addend:
1925 {
1926 HOST_WIDE_INT low, high;
1927
1928 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1929 addend -= low;
1930 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1931 addend -= high;
1932
1933 if (addend)
1934 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1935 (no_new_pseudos ? scratch : NULL_RTX),
1936 1, OPTAB_LIB_WIDEN);
1937 if (high)
1938 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1939 (no_new_pseudos ? scratch : NULL_RTX),
1940 1, OPTAB_LIB_WIDEN);
1941
1942 return plus_constant (x, low);
1943 }
1944 }
1945
1946 /* We do not allow indirect calls to be optimized into sibling calls, nor
1947 can we allow a call to a function with a different GP to be optimized
1948 into a sibcall. */
1949
1950 static bool
1951 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1952 {
1953 /* Can't do indirect tail calls, since we don't know if the target
1954 uses the same GP. */
1955 if (!decl)
1956 return false;
1957
1958 /* Otherwise, we can make a tail call if the target function shares
1959 the same GP. */
1960 return decl_has_samegp (decl);
1961 }
1962
1963 /* For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a
1964 small symbolic operand until after reload. At which point we need
1965 to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref))
1966 so that sched2 has the proper dependency information. */
1967
1968 static int
1969 some_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1970 {
1971 rtx x = *px;
1972
1973 /* Don't re-split. */
1974 if (GET_CODE (x) == LO_SUM)
1975 return -1;
1976
1977 return small_symbolic_operand (x, Pmode) != 0;
1978 }
1979
1980 int
1981 some_small_symbolic_operand (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1982 {
1983 return for_each_rtx (&x, some_small_symbolic_operand_1, NULL);
1984 }
1985
1986 static int
1987 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1988 {
1989 rtx x = *px;
1990
1991 /* Don't re-split. */
1992 if (GET_CODE (x) == LO_SUM)
1993 return -1;
1994
1995 if (small_symbolic_operand (x, Pmode))
1996 {
1997 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1998 *px = x;
1999 return -1;
2000 }
2001
2002 return 0;
2003 }
2004
2005 rtx
2006 split_small_symbolic_operand (rtx x)
2007 {
2008 x = copy_insn (x);
2009 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
2010 return x;
2011 }
2012
2013 /* Indicate that INSN cannot be duplicated. This is true for any insn
2014 that we've marked with gpdisp relocs, since those have to stay in
2015 1-1 correspondence with one another.
2016
2017 Technically we could copy them if we could set up a mapping from one
2018 sequence number to another, across the set of insns to be duplicated.
2019 This seems overly complicated and error-prone since interblock motion
2020 from sched-ebb could move one of the pair of insns to a different block.
2021
2022 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
2023 then they'll be in a different block from their ldgp. Which could lead
2024 the bb reorder code to think that it would be ok to copy just the block
2025 containing the call and branch to the block containing the ldgp. */
2026
2027 static bool
2028 alpha_cannot_copy_insn_p (rtx insn)
2029 {
2030 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
2031 return false;
2032 if (recog_memoized (insn) >= 0)
2033 return get_attr_cannot_copy (insn);
2034 else
2035 return false;
2036 }
2037
2038
2039 /* Try a machine-dependent way of reloading an illegitimate address
2040 operand. If we find one, push the reload and return the new rtx. */
2041
2042 rtx
2043 alpha_legitimize_reload_address (rtx x,
2044 enum machine_mode mode ATTRIBUTE_UNUSED,
2045 int opnum, int type,
2046 int ind_levels ATTRIBUTE_UNUSED)
2047 {
2048 /* We must recognize output that we have already generated ourselves. */
2049 if (GET_CODE (x) == PLUS
2050 && GET_CODE (XEXP (x, 0)) == PLUS
2051 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2052 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2053 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2054 {
2055 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2056 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2057 opnum, type);
2058 return x;
2059 }
2060
2061 /* We wish to handle large displacements off a base register by
2062 splitting the addend across an ldah and the mem insn. This
2063 cuts number of extra insns needed from 3 to 1. */
2064 if (GET_CODE (x) == PLUS
2065 && GET_CODE (XEXP (x, 0)) == REG
2066 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
2067 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
2068 && GET_CODE (XEXP (x, 1)) == CONST_INT)
2069 {
2070 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
2071 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
2072 HOST_WIDE_INT high
2073 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
2074
2075 /* Check for 32-bit overflow. */
2076 if (high + low != val)
2077 return NULL_RTX;
2078
2079 /* Reload the high part into a base reg; leave the low part
2080 in the mem directly. */
2081 x = gen_rtx_PLUS (GET_MODE (x),
2082 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
2083 GEN_INT (high)),
2084 GEN_INT (low));
2085
2086 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2087 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
2088 opnum, type);
2089 return x;
2090 }
2091
2092 return NULL_RTX;
2093 }
2094 \f
2095 /* Compute a (partial) cost for rtx X. Return true if the complete
2096 cost has been computed, and false if subexpressions should be
2097 scanned. In either case, *TOTAL contains the cost result. */
2098
2099 static bool
2100 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
2101 {
2102 enum machine_mode mode = GET_MODE (x);
2103 bool float_mode_p = FLOAT_MODE_P (mode);
2104
2105 switch (code)
2106 {
2107 /* If this is an 8-bit constant, return zero since it can be used
2108 nearly anywhere with no cost. If it is a valid operand for an
2109 ADD or AND, likewise return 0 if we know it will be used in that
2110 context. Otherwise, return 2 since it might be used there later.
2111 All other constants take at least two insns. */
2112 case CONST_INT:
2113 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
2114 {
2115 *total = 0;
2116 return true;
2117 }
2118 /* FALLTHRU */
2119
2120 case CONST_DOUBLE:
2121 if (x == CONST0_RTX (mode))
2122 *total = 0;
2123 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
2124 || (outer_code == AND && and_operand (x, VOIDmode)))
2125 *total = 0;
2126 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
2127 *total = 2;
2128 else
2129 *total = COSTS_N_INSNS (2);
2130 return true;
2131
2132 case CONST:
2133 case SYMBOL_REF:
2134 case LABEL_REF:
2135 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
2136 *total = COSTS_N_INSNS (outer_code != MEM);
2137 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
2138 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
2139 else if (tls_symbolic_operand_type (x))
2140 /* Estimate of cost for call_pal rduniq. */
2141 *total = COSTS_N_INSNS (15);
2142 else
2143 /* Otherwise we do a load from the GOT. */
2144 *total = COSTS_N_INSNS (alpha_memory_latency);
2145 return true;
2146
2147 case PLUS:
2148 case MINUS:
2149 if (float_mode_p)
2150 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2151 else if (GET_CODE (XEXP (x, 0)) == MULT
2152 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
2153 {
2154 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
2155 + rtx_cost (XEXP (x, 1), outer_code) + 2);
2156 return true;
2157 }
2158 return false;
2159
2160 case MULT:
2161 if (float_mode_p)
2162 *total = alpha_rtx_cost_data[alpha_cpu].fp_mult;
2163 else if (mode == DImode)
2164 *total = alpha_rtx_cost_data[alpha_cpu].int_mult_di;
2165 else
2166 *total = alpha_rtx_cost_data[alpha_cpu].int_mult_si;
2167 return false;
2168
2169 case ASHIFT:
2170 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2171 && INTVAL (XEXP (x, 1)) <= 3)
2172 {
2173 *total = COSTS_N_INSNS (1);
2174 return false;
2175 }
2176 /* FALLTHRU */
2177
2178 case ASHIFTRT:
2179 case LSHIFTRT:
2180 *total = alpha_rtx_cost_data[alpha_cpu].int_shift;
2181 return false;
2182
2183 case IF_THEN_ELSE:
2184 if (float_mode_p)
2185 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2186 else
2187 *total = alpha_rtx_cost_data[alpha_cpu].int_cmov;
2188 return false;
2189
2190 case DIV:
2191 case UDIV:
2192 case MOD:
2193 case UMOD:
2194 if (!float_mode_p)
2195 *total = COSTS_N_INSNS (70); /* ??? */
2196 else if (mode == SFmode)
2197 *total = alpha_rtx_cost_data[alpha_cpu].fp_div_sf;
2198 else
2199 *total = alpha_rtx_cost_data[alpha_cpu].fp_div_df;
2200 return false;
2201
2202 case MEM:
2203 *total = COSTS_N_INSNS (alpha_memory_latency);
2204 return true;
2205
2206 case NEG:
2207 if (! float_mode_p)
2208 {
2209 *total = COSTS_N_INSNS (1);
2210 return false;
2211 }
2212 /* FALLTHRU */
2213
2214 case ABS:
2215 if (! float_mode_p)
2216 {
2217 *total = COSTS_N_INSNS (1) + alpha_rtx_cost_data[alpha_cpu].int_cmov;
2218 return false;
2219 }
2220 /* FALLTHRU */
2221
2222 case FLOAT:
2223 case UNSIGNED_FLOAT:
2224 case FIX:
2225 case UNSIGNED_FIX:
2226 case FLOAT_EXTEND:
2227 case FLOAT_TRUNCATE:
2228 *total = alpha_rtx_cost_data[alpha_cpu].fp_add;
2229 return false;
2230
2231 default:
2232 return false;
2233 }
2234 }
2235 \f
2236 /* REF is an alignable memory location. Place an aligned SImode
2237 reference into *PALIGNED_MEM and the number of bits to shift into
2238 *PBITNUM. SCRATCH is a free register for use in reloading out
2239 of range stack slots. */
2240
2241 void
2242 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
2243 {
2244 rtx base;
2245 HOST_WIDE_INT offset = 0;
2246
2247 if (GET_CODE (ref) != MEM)
2248 abort ();
2249
2250 if (reload_in_progress
2251 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2252 {
2253 base = find_replacement (&XEXP (ref, 0));
2254
2255 if (! memory_address_p (GET_MODE (ref), base))
2256 abort ();
2257 }
2258 else
2259 {
2260 base = XEXP (ref, 0);
2261 }
2262
2263 if (GET_CODE (base) == PLUS)
2264 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2265
2266 *paligned_mem
2267 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
2268
2269 if (WORDS_BIG_ENDIAN)
2270 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
2271 + (offset & 3) * 8));
2272 else
2273 *pbitnum = GEN_INT ((offset & 3) * 8);
2274 }
2275
2276 /* Similar, but just get the address. Handle the two reload cases.
2277 Add EXTRA_OFFSET to the address we return. */
2278
2279 rtx
2280 get_unaligned_address (rtx ref, int extra_offset)
2281 {
2282 rtx base;
2283 HOST_WIDE_INT offset = 0;
2284
2285 if (GET_CODE (ref) != MEM)
2286 abort ();
2287
2288 if (reload_in_progress
2289 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
2290 {
2291 base = find_replacement (&XEXP (ref, 0));
2292
2293 if (! memory_address_p (GET_MODE (ref), base))
2294 abort ();
2295 }
2296 else
2297 {
2298 base = XEXP (ref, 0);
2299 }
2300
2301 if (GET_CODE (base) == PLUS)
2302 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
2303
2304 return plus_constant (base, offset + extra_offset);
2305 }
2306
2307 /* On the Alpha, all (non-symbolic) constants except zero go into
2308 a floating-point register via memory. Note that we cannot
2309 return anything that is not a subset of CLASS, and that some
2310 symbolic constants cannot be dropped to memory. */
2311
2312 enum reg_class
2313 alpha_preferred_reload_class(rtx x, enum reg_class class)
2314 {
2315 /* Zero is present in any register class. */
2316 if (x == CONST0_RTX (GET_MODE (x)))
2317 return class;
2318
2319 /* These sorts of constants we can easily drop to memory. */
2320 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
2321 {
2322 if (class == FLOAT_REGS)
2323 return NO_REGS;
2324 if (class == ALL_REGS)
2325 return GENERAL_REGS;
2326 return class;
2327 }
2328
2329 /* All other kinds of constants should not (and in the case of HIGH
2330 cannot) be dropped to memory -- instead we use a GENERAL_REGS
2331 secondary reload. */
2332 if (CONSTANT_P (x))
2333 return (class == ALL_REGS ? GENERAL_REGS : class);
2334
2335 return class;
2336 }
2337
2338 /* Loading and storing HImode or QImode values to and from memory
2339 usually requires a scratch register. The exceptions are loading
2340 QImode and HImode from an aligned address to a general register
2341 unless byte instructions are permitted.
2342
2343 We also cannot load an unaligned address or a paradoxical SUBREG
2344 into an FP register.
2345
2346 We also cannot do integral arithmetic into FP regs, as might result
2347 from register elimination into a DImode fp register. */
2348
2349 enum reg_class
2350 secondary_reload_class (enum reg_class class, enum machine_mode mode,
2351 rtx x, int in)
2352 {
2353 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
2354 {
2355 if (GET_CODE (x) == MEM
2356 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
2357 || (GET_CODE (x) == SUBREG
2358 && (GET_CODE (SUBREG_REG (x)) == MEM
2359 || (GET_CODE (SUBREG_REG (x)) == REG
2360 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
2361 {
2362 if (!in || !aligned_memory_operand(x, mode))
2363 return GENERAL_REGS;
2364 }
2365 }
2366
2367 if (class == FLOAT_REGS)
2368 {
2369 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
2370 return GENERAL_REGS;
2371
2372 if (GET_CODE (x) == SUBREG
2373 && (GET_MODE_SIZE (GET_MODE (x))
2374 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2375 return GENERAL_REGS;
2376
2377 if (in && INTEGRAL_MODE_P (mode)
2378 && ! (memory_operand (x, mode) || x == const0_rtx))
2379 return GENERAL_REGS;
2380 }
2381
2382 return NO_REGS;
2383 }
2384 \f
2385 /* Subfunction of the following function. Update the flags of any MEM
2386 found in part of X. */
2387
2388 static void
2389 alpha_set_memflags_1 (rtx x, int in_struct_p, int volatile_p, int unchanging_p)
2390 {
2391 int i;
2392
2393 switch (GET_CODE (x))
2394 {
2395 case SEQUENCE:
2396 abort ();
2397
2398 case PARALLEL:
2399 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
2400 alpha_set_memflags_1 (XVECEXP (x, 0, i), in_struct_p, volatile_p,
2401 unchanging_p);
2402 break;
2403
2404 case INSN:
2405 alpha_set_memflags_1 (PATTERN (x), in_struct_p, volatile_p,
2406 unchanging_p);
2407 break;
2408
2409 case SET:
2410 alpha_set_memflags_1 (SET_DEST (x), in_struct_p, volatile_p,
2411 unchanging_p);
2412 alpha_set_memflags_1 (SET_SRC (x), in_struct_p, volatile_p,
2413 unchanging_p);
2414 break;
2415
2416 case MEM:
2417 MEM_IN_STRUCT_P (x) = in_struct_p;
2418 MEM_VOLATILE_P (x) = volatile_p;
2419 RTX_UNCHANGING_P (x) = unchanging_p;
2420 /* Sadly, we cannot use alias sets because the extra aliasing
2421 produced by the AND interferes. Given that two-byte quantities
2422 are the only thing we would be able to differentiate anyway,
2423 there does not seem to be any point in convoluting the early
2424 out of the alias check. */
2425 break;
2426
2427 default:
2428 break;
2429 }
2430 }
2431
2432 /* Given INSN, which is an INSN list or the PATTERN of a single insn
2433 generated to perform a memory operation, look for any MEMs in either
2434 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
2435 volatile flags from REF into each of the MEMs found. If REF is not
2436 a MEM, don't do anything. */
2437
2438 void
2439 alpha_set_memflags (rtx insn, rtx ref)
2440 {
2441 int in_struct_p, volatile_p, unchanging_p;
2442
2443 if (GET_CODE (ref) != MEM)
2444 return;
2445
2446 in_struct_p = MEM_IN_STRUCT_P (ref);
2447 volatile_p = MEM_VOLATILE_P (ref);
2448 unchanging_p = RTX_UNCHANGING_P (ref);
2449
2450 /* This is only called from alpha.md, after having had something
2451 generated from one of the insn patterns. So if everything is
2452 zero, the pattern is already up-to-date. */
2453 if (! in_struct_p && ! volatile_p && ! unchanging_p)
2454 return;
2455
2456 alpha_set_memflags_1 (insn, in_struct_p, volatile_p, unchanging_p);
2457 }
2458 \f
2459 /* Internal routine for alpha_emit_set_const to check for N or below insns. */
2460
2461 static rtx
2462 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
2463 HOST_WIDE_INT c, int n)
2464 {
2465 HOST_WIDE_INT new;
2466 int i, bits;
2467 /* Use a pseudo if highly optimizing and still generating RTL. */
2468 rtx subtarget
2469 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
2470 rtx temp, insn;
2471
2472 /* If this is a sign-extended 32-bit constant, we can do this in at most
2473 three insns, so do it if we have enough insns left. We always have
2474 a sign-extended 32-bit constant when compiling on a narrow machine. */
2475
2476 if (HOST_BITS_PER_WIDE_INT != 64
2477 || c >> 31 == -1 || c >> 31 == 0)
2478 {
2479 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
2480 HOST_WIDE_INT tmp1 = c - low;
2481 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
2482 HOST_WIDE_INT extra = 0;
2483
2484 /* If HIGH will be interpreted as negative but the constant is
2485 positive, we must adjust it to do two ldha insns. */
2486
2487 if ((high & 0x8000) != 0 && c >= 0)
2488 {
2489 extra = 0x4000;
2490 tmp1 -= 0x40000000;
2491 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
2492 }
2493
2494 if (c == low || (low == 0 && extra == 0))
2495 {
2496 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
2497 but that meant that we can't handle INT_MIN on 32-bit machines
2498 (like NT/Alpha), because we recurse indefinitely through
2499 emit_move_insn to gen_movdi. So instead, since we know exactly
2500 what we want, create it explicitly. */
2501
2502 if (target == NULL)
2503 target = gen_reg_rtx (mode);
2504 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
2505 return target;
2506 }
2507 else if (n >= 2 + (extra != 0))
2508 {
2509 if (no_new_pseudos)
2510 {
2511 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
2512 temp = target;
2513 }
2514 else
2515 temp = copy_to_suggested_reg (GEN_INT (high << 16),
2516 subtarget, mode);
2517
2518 /* As of 2002-02-23, addsi3 is only available when not optimizing.
2519 This means that if we go through expand_binop, we'll try to
2520 generate extensions, etc, which will require new pseudos, which
2521 will fail during some split phases. The SImode add patterns
2522 still exist, but are not named. So build the insns by hand. */
2523
2524 if (extra != 0)
2525 {
2526 if (! subtarget)
2527 subtarget = gen_reg_rtx (mode);
2528 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
2529 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
2530 emit_insn (insn);
2531 temp = subtarget;
2532 }
2533
2534 if (target == NULL)
2535 target = gen_reg_rtx (mode);
2536 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2537 insn = gen_rtx_SET (VOIDmode, target, insn);
2538 emit_insn (insn);
2539 return target;
2540 }
2541 }
2542
2543 /* If we couldn't do it that way, try some other methods. But if we have
2544 no instructions left, don't bother. Likewise, if this is SImode and
2545 we can't make pseudos, we can't do anything since the expand_binop
2546 and expand_unop calls will widen and try to make pseudos. */
2547
2548 if (n == 1 || (mode == SImode && no_new_pseudos))
2549 return 0;
2550
2551 /* Next, see if we can load a related constant and then shift and possibly
2552 negate it to get the constant we want. Try this once each increasing
2553 numbers of insns. */
2554
2555 for (i = 1; i < n; i++)
2556 {
2557 /* First, see if minus some low bits, we've an easy load of
2558 high bits. */
2559
2560 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
2561 if (new != 0
2562 && (temp = alpha_emit_set_const (subtarget, mode, c - new, i)) != 0)
2563 return expand_binop (mode, add_optab, temp, GEN_INT (new),
2564 target, 0, OPTAB_WIDEN);
2565
2566 /* Next try complementing. */
2567 if ((temp = alpha_emit_set_const (subtarget, mode, ~ c, i)) != 0)
2568 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
2569
2570 /* Next try to form a constant and do a left shift. We can do this
2571 if some low-order bits are zero; the exact_log2 call below tells
2572 us that information. The bits we are shifting out could be any
2573 value, but here we'll just try the 0- and sign-extended forms of
2574 the constant. To try to increase the chance of having the same
2575 constant in more than one insn, start at the highest number of
2576 bits to shift, but try all possibilities in case a ZAPNOT will
2577 be useful. */
2578
2579 if ((bits = exact_log2 (c & - c)) > 0)
2580 for (; bits > 0; bits--)
2581 if ((temp = (alpha_emit_set_const
2582 (subtarget, mode, c >> bits, i))) != 0
2583 || ((temp = (alpha_emit_set_const
2584 (subtarget, mode,
2585 ((unsigned HOST_WIDE_INT) c) >> bits, i)))
2586 != 0))
2587 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
2588 target, 0, OPTAB_WIDEN);
2589
2590 /* Now try high-order zero bits. Here we try the shifted-in bits as
2591 all zero and all ones. Be careful to avoid shifting outside the
2592 mode and to avoid shifting outside the host wide int size. */
2593 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
2594 confuse the recursive call and set all of the high 32 bits. */
2595
2596 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2597 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
2598 for (; bits > 0; bits--)
2599 if ((temp = alpha_emit_set_const (subtarget, mode,
2600 c << bits, i)) != 0
2601 || ((temp = (alpha_emit_set_const
2602 (subtarget, mode,
2603 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2604 i)))
2605 != 0))
2606 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
2607 target, 1, OPTAB_WIDEN);
2608
2609 /* Now try high-order 1 bits. We get that with a sign-extension.
2610 But one bit isn't enough here. Be careful to avoid shifting outside
2611 the mode and to avoid shifting outside the host wide int size. */
2612
2613 if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
2614 - floor_log2 (~ c) - 2)) > 0)
2615 for (; bits > 0; bits--)
2616 if ((temp = alpha_emit_set_const (subtarget, mode,
2617 c << bits, i)) != 0
2618 || ((temp = (alpha_emit_set_const
2619 (subtarget, mode,
2620 ((c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1)),
2621 i)))
2622 != 0))
2623 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
2624 target, 0, OPTAB_WIDEN);
2625 }
2626
2627 #if HOST_BITS_PER_WIDE_INT == 64
2628 /* Finally, see if can load a value into the target that is the same as the
2629 constant except that all bytes that are 0 are changed to be 0xff. If we
2630 can, then we can do a ZAPNOT to obtain the desired constant. */
2631
2632 new = c;
2633 for (i = 0; i < 64; i += 8)
2634 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
2635 new |= (HOST_WIDE_INT) 0xff << i;
2636
2637 /* We are only called for SImode and DImode. If this is SImode, ensure that
2638 we are sign extended to a full word. */
2639
2640 if (mode == SImode)
2641 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
2642
2643 if (new != c && new != -1
2644 && (temp = alpha_emit_set_const (subtarget, mode, new, n - 1)) != 0)
2645 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
2646 target, 0, OPTAB_WIDEN);
2647 #endif
2648
2649 return 0;
2650 }
2651
2652 /* Try to output insns to set TARGET equal to the constant C if it can be
2653 done in less than N insns. Do all computations in MODE. Returns the place
2654 where the output has been placed if it can be done and the insns have been
2655 emitted. If it would take more than N insns, zero is returned and no
2656 insns and emitted. */
2657
2658 rtx
2659 alpha_emit_set_const (rtx target, enum machine_mode mode,
2660 HOST_WIDE_INT c, int n)
2661 {
2662 rtx result = 0;
2663 rtx orig_target = target;
2664 int i;
2665
2666 /* If we can't make any pseudos, TARGET is an SImode hard register, we
2667 can't load this constant in one insn, do this in DImode. */
2668 if (no_new_pseudos && mode == SImode
2669 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER
2670 && (result = alpha_emit_set_const_1 (target, mode, c, 1)) == 0)
2671 {
2672 target = gen_lowpart (DImode, target);
2673 mode = DImode;
2674 }
2675
2676 /* Try 1 insn, then 2, then up to N. */
2677 for (i = 1; i <= n; i++)
2678 {
2679 result = alpha_emit_set_const_1 (target, mode, c, i);
2680 if (result)
2681 {
2682 rtx insn = get_last_insn ();
2683 rtx set = single_set (insn);
2684 if (! CONSTANT_P (SET_SRC (set)))
2685 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
2686 break;
2687 }
2688 }
2689
2690 /* Allow for the case where we changed the mode of TARGET. */
2691 if (result == target)
2692 result = orig_target;
2693
2694 return result;
2695 }
2696
2697 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2698 fall back to a straight forward decomposition. We do this to avoid
2699 exponential run times encountered when looking for longer sequences
2700 with alpha_emit_set_const. */
2701
2702 rtx
2703 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2704 {
2705 HOST_WIDE_INT d1, d2, d3, d4;
2706
2707 /* Decompose the entire word */
2708 #if HOST_BITS_PER_WIDE_INT >= 64
2709 if (c2 != -(c1 < 0))
2710 abort ();
2711 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2712 c1 -= d1;
2713 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2714 c1 = (c1 - d2) >> 32;
2715 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2716 c1 -= d3;
2717 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2718 if (c1 != d4)
2719 abort ();
2720 #else
2721 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2722 c1 -= d1;
2723 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2724 if (c1 != d2)
2725 abort ();
2726 c2 += (d2 < 0);
2727 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2728 c2 -= d3;
2729 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2730 if (c2 != d4)
2731 abort ();
2732 #endif
2733
2734 /* Construct the high word */
2735 if (d4)
2736 {
2737 emit_move_insn (target, GEN_INT (d4));
2738 if (d3)
2739 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2740 }
2741 else
2742 emit_move_insn (target, GEN_INT (d3));
2743
2744 /* Shift it into place */
2745 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2746
2747 /* Add in the low bits. */
2748 if (d2)
2749 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2750 if (d1)
2751 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2752
2753 return target;
2754 }
2755
2756 /* Expand a move instruction; return true if all work is done.
2757 We don't handle non-bwx subword loads here. */
2758
2759 bool
2760 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2761 {
2762 /* If the output is not a register, the input must be. */
2763 if (GET_CODE (operands[0]) == MEM
2764 && ! reg_or_0_operand (operands[1], mode))
2765 operands[1] = force_reg (mode, operands[1]);
2766
2767 /* Allow legitimize_address to perform some simplifications. */
2768 if (mode == Pmode && symbolic_operand (operands[1], mode))
2769 {
2770 rtx tmp;
2771
2772 /* With RTL inlining, at -O3, rtl is generated, stored, then actually
2773 compiled at the end of compilation. In the meantime, someone can
2774 re-encode-section-info on some symbol changing it e.g. from global
2775 to local-not-small. If this happens, we'd have emitted a plain
2776 load rather than a high+losum load and not recognize the insn.
2777
2778 So if rtl inlining is in effect, we delay the global/not-global
2779 decision until rest_of_compilation by wrapping it in an
2780 UNSPEC_SYMBOL. */
2781 if (TARGET_EXPLICIT_RELOCS && flag_inline_functions
2782 && rtx_equal_function_value_matters
2783 && global_symbolic_operand (operands[1], mode))
2784 {
2785 emit_insn (gen_movdi_er_maybe_g (operands[0], operands[1]));
2786 return true;
2787 }
2788
2789 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2790 if (tmp)
2791 {
2792 if (tmp == operands[0])
2793 return true;
2794 operands[1] = tmp;
2795 return false;
2796 }
2797 }
2798
2799 /* Early out for non-constants and valid constants. */
2800 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2801 return false;
2802
2803 /* Split large integers. */
2804 if (GET_CODE (operands[1]) == CONST_INT
2805 || GET_CODE (operands[1]) == CONST_DOUBLE)
2806 {
2807 HOST_WIDE_INT i0, i1;
2808 rtx temp = NULL_RTX;
2809
2810 if (GET_CODE (operands[1]) == CONST_INT)
2811 {
2812 i0 = INTVAL (operands[1]);
2813 i1 = -(i0 < 0);
2814 }
2815 else if (HOST_BITS_PER_WIDE_INT >= 64)
2816 {
2817 i0 = CONST_DOUBLE_LOW (operands[1]);
2818 i1 = -(i0 < 0);
2819 }
2820 else
2821 {
2822 i0 = CONST_DOUBLE_LOW (operands[1]);
2823 i1 = CONST_DOUBLE_HIGH (operands[1]);
2824 }
2825
2826 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2827 temp = alpha_emit_set_const (operands[0], mode, i0, 3);
2828
2829 if (!temp && TARGET_BUILD_CONSTANTS)
2830 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2831
2832 if (temp)
2833 {
2834 if (rtx_equal_p (operands[0], temp))
2835 return true;
2836 operands[1] = temp;
2837 return false;
2838 }
2839 }
2840
2841 /* Otherwise we've nothing left but to drop the thing to memory. */
2842 operands[1] = force_const_mem (mode, operands[1]);
2843 if (reload_in_progress)
2844 {
2845 emit_move_insn (operands[0], XEXP (operands[1], 0));
2846 operands[1] = copy_rtx (operands[1]);
2847 XEXP (operands[1], 0) = operands[0];
2848 }
2849 else
2850 operands[1] = validize_mem (operands[1]);
2851 return false;
2852 }
2853
2854 /* Expand a non-bwx QImode or HImode move instruction;
2855 return true if all work is done. */
2856
2857 bool
2858 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2859 {
2860 /* If the output is not a register, the input must be. */
2861 if (GET_CODE (operands[0]) == MEM)
2862 operands[1] = force_reg (mode, operands[1]);
2863
2864 /* Handle four memory cases, unaligned and aligned for either the input
2865 or the output. The only case where we can be called during reload is
2866 for aligned loads; all other cases require temporaries. */
2867
2868 if (GET_CODE (operands[1]) == MEM
2869 || (GET_CODE (operands[1]) == SUBREG
2870 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2871 || (reload_in_progress && GET_CODE (operands[1]) == REG
2872 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2873 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2874 && GET_CODE (SUBREG_REG (operands[1])) == REG
2875 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2876 {
2877 if (aligned_memory_operand (operands[1], mode))
2878 {
2879 if (reload_in_progress)
2880 {
2881 emit_insn ((mode == QImode
2882 ? gen_reload_inqi_help
2883 : gen_reload_inhi_help)
2884 (operands[0], operands[1],
2885 gen_rtx_REG (SImode, REGNO (operands[0]))));
2886 }
2887 else
2888 {
2889 rtx aligned_mem, bitnum;
2890 rtx scratch = gen_reg_rtx (SImode);
2891 rtx subtarget;
2892 bool copyout;
2893
2894 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2895
2896 subtarget = operands[0];
2897 if (GET_CODE (subtarget) == REG)
2898 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2899 else
2900 subtarget = gen_reg_rtx (DImode), copyout = true;
2901
2902 emit_insn ((mode == QImode
2903 ? gen_aligned_loadqi
2904 : gen_aligned_loadhi)
2905 (subtarget, aligned_mem, bitnum, scratch));
2906
2907 if (copyout)
2908 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2909 }
2910 }
2911 else
2912 {
2913 /* Don't pass these as parameters since that makes the generated
2914 code depend on parameter evaluation order which will cause
2915 bootstrap failures. */
2916
2917 rtx temp1, temp2, seq, subtarget;
2918 bool copyout;
2919
2920 temp1 = gen_reg_rtx (DImode);
2921 temp2 = gen_reg_rtx (DImode);
2922
2923 subtarget = operands[0];
2924 if (GET_CODE (subtarget) == REG)
2925 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2926 else
2927 subtarget = gen_reg_rtx (DImode), copyout = true;
2928
2929 seq = ((mode == QImode
2930 ? gen_unaligned_loadqi
2931 : gen_unaligned_loadhi)
2932 (subtarget, get_unaligned_address (operands[1], 0),
2933 temp1, temp2));
2934 alpha_set_memflags (seq, operands[1]);
2935 emit_insn (seq);
2936
2937 if (copyout)
2938 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2939 }
2940 return true;
2941 }
2942
2943 if (GET_CODE (operands[0]) == MEM
2944 || (GET_CODE (operands[0]) == SUBREG
2945 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2946 || (reload_in_progress && GET_CODE (operands[0]) == REG
2947 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2948 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2949 && GET_CODE (SUBREG_REG (operands[0])) == REG
2950 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2951 {
2952 if (aligned_memory_operand (operands[0], mode))
2953 {
2954 rtx aligned_mem, bitnum;
2955 rtx temp1 = gen_reg_rtx (SImode);
2956 rtx temp2 = gen_reg_rtx (SImode);
2957
2958 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2959
2960 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2961 temp1, temp2));
2962 }
2963 else
2964 {
2965 rtx temp1 = gen_reg_rtx (DImode);
2966 rtx temp2 = gen_reg_rtx (DImode);
2967 rtx temp3 = gen_reg_rtx (DImode);
2968 rtx seq = ((mode == QImode
2969 ? gen_unaligned_storeqi
2970 : gen_unaligned_storehi)
2971 (get_unaligned_address (operands[0], 0),
2972 operands[1], temp1, temp2, temp3));
2973
2974 alpha_set_memflags (seq, operands[0]);
2975 emit_insn (seq);
2976 }
2977 return true;
2978 }
2979
2980 return false;
2981 }
2982
2983 /* Generate an unsigned DImode to FP conversion. This is the same code
2984 optabs would emit if we didn't have TFmode patterns.
2985
2986 For SFmode, this is the only construction I've found that can pass
2987 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2988 intermediates will work, because you'll get intermediate rounding
2989 that ruins the end result. Some of this could be fixed by turning
2990 on round-to-positive-infinity, but that requires diddling the fpsr,
2991 which kills performance. I tried turning this around and converting
2992 to a negative number, so that I could turn on /m, but either I did
2993 it wrong or there's something else cause I wound up with the exact
2994 same single-bit error. There is a branch-less form of this same code:
2995
2996 srl $16,1,$1
2997 and $16,1,$2
2998 cmplt $16,0,$3
2999 or $1,$2,$2
3000 cmovge $16,$16,$2
3001 itoft $3,$f10
3002 itoft $2,$f11
3003 cvtqs $f11,$f11
3004 adds $f11,$f11,$f0
3005 fcmoveq $f10,$f11,$f0
3006
3007 I'm not using it because it's the same number of instructions as
3008 this branch-full form, and it has more serialized long latency
3009 instructions on the critical path.
3010
3011 For DFmode, we can avoid rounding errors by breaking up the word
3012 into two pieces, converting them separately, and adding them back:
3013
3014 LC0: .long 0,0x5f800000
3015
3016 itoft $16,$f11
3017 lda $2,LC0
3018 cmplt $16,0,$1
3019 cpyse $f11,$f31,$f10
3020 cpyse $f31,$f11,$f11
3021 s4addq $1,$2,$1
3022 lds $f12,0($1)
3023 cvtqt $f10,$f10
3024 cvtqt $f11,$f11
3025 addt $f12,$f10,$f0
3026 addt $f0,$f11,$f0
3027
3028 This doesn't seem to be a clear-cut win over the optabs form.
3029 It probably all depends on the distribution of numbers being
3030 converted -- in the optabs form, all but high-bit-set has a
3031 much lower minimum execution time. */
3032
3033 void
3034 alpha_emit_floatuns (rtx operands[2])
3035 {
3036 rtx neglab, donelab, i0, i1, f0, in, out;
3037 enum machine_mode mode;
3038
3039 out = operands[0];
3040 in = force_reg (DImode, operands[1]);
3041 mode = GET_MODE (out);
3042 neglab = gen_label_rtx ();
3043 donelab = gen_label_rtx ();
3044 i0 = gen_reg_rtx (DImode);
3045 i1 = gen_reg_rtx (DImode);
3046 f0 = gen_reg_rtx (mode);
3047
3048 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
3049
3050 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
3051 emit_jump_insn (gen_jump (donelab));
3052 emit_barrier ();
3053
3054 emit_label (neglab);
3055
3056 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
3057 emit_insn (gen_anddi3 (i1, in, const1_rtx));
3058 emit_insn (gen_iordi3 (i0, i0, i1));
3059 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
3060 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
3061
3062 emit_label (donelab);
3063 }
3064
3065 /* Generate the comparison for a conditional branch. */
3066
3067 rtx
3068 alpha_emit_conditional_branch (enum rtx_code code)
3069 {
3070 enum rtx_code cmp_code, branch_code;
3071 enum machine_mode cmp_mode, branch_mode = VOIDmode;
3072 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3073 rtx tem;
3074
3075 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
3076 {
3077 if (! TARGET_HAS_XFLOATING_LIBS)
3078 abort ();
3079
3080 /* X_floating library comparison functions return
3081 -1 unordered
3082 0 false
3083 1 true
3084 Convert the compare against the raw return value. */
3085
3086 switch (code)
3087 {
3088 case UNORDERED:
3089 cmp_code = EQ;
3090 code = LT;
3091 break;
3092 case ORDERED:
3093 cmp_code = EQ;
3094 code = GE;
3095 break;
3096 case NE:
3097 cmp_code = NE;
3098 code = NE;
3099 break;
3100 default:
3101 cmp_code = code;
3102 code = GT;
3103 break;
3104 }
3105
3106 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3107 op1 = const0_rtx;
3108 alpha_compare.fp_p = 0;
3109 }
3110
3111 /* The general case: fold the comparison code to the types of compares
3112 that we have, choosing the branch as necessary. */
3113 switch (code)
3114 {
3115 case EQ: case LE: case LT: case LEU: case LTU:
3116 case UNORDERED:
3117 /* We have these compares: */
3118 cmp_code = code, branch_code = NE;
3119 break;
3120
3121 case NE:
3122 case ORDERED:
3123 /* These must be reversed. */
3124 cmp_code = reverse_condition (code), branch_code = EQ;
3125 break;
3126
3127 case GE: case GT: case GEU: case GTU:
3128 /* For FP, we swap them, for INT, we reverse them. */
3129 if (alpha_compare.fp_p)
3130 {
3131 cmp_code = swap_condition (code);
3132 branch_code = NE;
3133 tem = op0, op0 = op1, op1 = tem;
3134 }
3135 else
3136 {
3137 cmp_code = reverse_condition (code);
3138 branch_code = EQ;
3139 }
3140 break;
3141
3142 default:
3143 abort ();
3144 }
3145
3146 if (alpha_compare.fp_p)
3147 {
3148 cmp_mode = DFmode;
3149 if (flag_unsafe_math_optimizations)
3150 {
3151 /* When we are not as concerned about non-finite values, and we
3152 are comparing against zero, we can branch directly. */
3153 if (op1 == CONST0_RTX (DFmode))
3154 cmp_code = NIL, branch_code = code;
3155 else if (op0 == CONST0_RTX (DFmode))
3156 {
3157 /* Undo the swap we probably did just above. */
3158 tem = op0, op0 = op1, op1 = tem;
3159 branch_code = swap_condition (cmp_code);
3160 cmp_code = NIL;
3161 }
3162 }
3163 else
3164 {
3165 /* ??? We mark the branch mode to be CCmode to prevent the
3166 compare and branch from being combined, since the compare
3167 insn follows IEEE rules that the branch does not. */
3168 branch_mode = CCmode;
3169 }
3170 }
3171 else
3172 {
3173 cmp_mode = DImode;
3174
3175 /* The following optimizations are only for signed compares. */
3176 if (code != LEU && code != LTU && code != GEU && code != GTU)
3177 {
3178 /* Whee. Compare and branch against 0 directly. */
3179 if (op1 == const0_rtx)
3180 cmp_code = NIL, branch_code = code;
3181
3182 /* If the constants doesn't fit into an immediate, but can
3183 be generated by lda/ldah, we adjust the argument and
3184 compare against zero, so we can use beq/bne directly. */
3185 /* ??? Don't do this when comparing against symbols, otherwise
3186 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
3187 be declared false out of hand (at least for non-weak). */
3188 else if (GET_CODE (op1) == CONST_INT
3189 && (code == EQ || code == NE)
3190 && !(symbolic_operand (op0, VOIDmode)
3191 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
3192 {
3193 HOST_WIDE_INT v = INTVAL (op1), n = -v;
3194
3195 if (! CONST_OK_FOR_LETTER_P (v, 'I')
3196 && (CONST_OK_FOR_LETTER_P (n, 'K')
3197 || CONST_OK_FOR_LETTER_P (n, 'L')))
3198 {
3199 cmp_code = PLUS, branch_code = code;
3200 op1 = GEN_INT (n);
3201 }
3202 }
3203 }
3204
3205 if (!reg_or_0_operand (op0, DImode))
3206 op0 = force_reg (DImode, op0);
3207 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
3208 op1 = force_reg (DImode, op1);
3209 }
3210
3211 /* Emit an initial compare instruction, if necessary. */
3212 tem = op0;
3213 if (cmp_code != NIL)
3214 {
3215 tem = gen_reg_rtx (cmp_mode);
3216 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
3217 }
3218
3219 /* Zero the operands. */
3220 memset (&alpha_compare, 0, sizeof (alpha_compare));
3221
3222 /* Return the branch comparison. */
3223 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
3224 }
3225
3226 /* Certain simplifications can be done to make invalid setcc operations
3227 valid. Return the final comparison, or NULL if we can't work. */
3228
3229 rtx
3230 alpha_emit_setcc (enum rtx_code code)
3231 {
3232 enum rtx_code cmp_code;
3233 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
3234 int fp_p = alpha_compare.fp_p;
3235 rtx tmp;
3236
3237 /* Zero the operands. */
3238 memset (&alpha_compare, 0, sizeof (alpha_compare));
3239
3240 if (fp_p && GET_MODE (op0) == TFmode)
3241 {
3242 if (! TARGET_HAS_XFLOATING_LIBS)
3243 abort ();
3244
3245 /* X_floating library comparison functions return
3246 -1 unordered
3247 0 false
3248 1 true
3249 Convert the compare against the raw return value. */
3250
3251 if (code == UNORDERED || code == ORDERED)
3252 cmp_code = EQ;
3253 else
3254 cmp_code = code;
3255
3256 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
3257 op1 = const0_rtx;
3258 fp_p = 0;
3259
3260 if (code == UNORDERED)
3261 code = LT;
3262 else if (code == ORDERED)
3263 code = GE;
3264 else
3265 code = GT;
3266 }
3267
3268 if (fp_p && !TARGET_FIX)
3269 return NULL_RTX;
3270
3271 /* The general case: fold the comparison code to the types of compares
3272 that we have, choosing the branch as necessary. */
3273
3274 cmp_code = NIL;
3275 switch (code)
3276 {
3277 case EQ: case LE: case LT: case LEU: case LTU:
3278 case UNORDERED:
3279 /* We have these compares. */
3280 if (fp_p)
3281 cmp_code = code, code = NE;
3282 break;
3283
3284 case NE:
3285 if (!fp_p && op1 == const0_rtx)
3286 break;
3287 /* FALLTHRU */
3288
3289 case ORDERED:
3290 cmp_code = reverse_condition (code);
3291 code = EQ;
3292 break;
3293
3294 case GE: case GT: case GEU: case GTU:
3295 /* These normally need swapping, but for integer zero we have
3296 special patterns that recognize swapped operands. */
3297 if (!fp_p && op1 == const0_rtx)
3298 break;
3299 code = swap_condition (code);
3300 if (fp_p)
3301 cmp_code = code, code = NE;
3302 tmp = op0, op0 = op1, op1 = tmp;
3303 break;
3304
3305 default:
3306 abort ();
3307 }
3308
3309 if (!fp_p)
3310 {
3311 if (!register_operand (op0, DImode))
3312 op0 = force_reg (DImode, op0);
3313 if (!reg_or_8bit_operand (op1, DImode))
3314 op1 = force_reg (DImode, op1);
3315 }
3316
3317 /* Emit an initial compare instruction, if necessary. */
3318 if (cmp_code != NIL)
3319 {
3320 enum machine_mode mode = fp_p ? DFmode : DImode;
3321
3322 tmp = gen_reg_rtx (mode);
3323 emit_insn (gen_rtx_SET (VOIDmode, tmp,
3324 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
3325
3326 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
3327 op1 = const0_rtx;
3328 }
3329
3330 /* Return the setcc comparison. */
3331 return gen_rtx_fmt_ee (code, DImode, op0, op1);
3332 }
3333
3334
3335 /* Rewrite a comparison against zero CMP of the form
3336 (CODE (cc0) (const_int 0)) so it can be written validly in
3337 a conditional move (if_then_else CMP ...).
3338 If both of the operands that set cc0 are nonzero we must emit
3339 an insn to perform the compare (it can't be done within
3340 the conditional move). */
3341
3342 rtx
3343 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
3344 {
3345 enum rtx_code code = GET_CODE (cmp);
3346 enum rtx_code cmov_code = NE;
3347 rtx op0 = alpha_compare.op0;
3348 rtx op1 = alpha_compare.op1;
3349 int fp_p = alpha_compare.fp_p;
3350 enum machine_mode cmp_mode
3351 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
3352 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
3353 enum machine_mode cmov_mode = VOIDmode;
3354 int local_fast_math = flag_unsafe_math_optimizations;
3355 rtx tem;
3356
3357 /* Zero the operands. */
3358 memset (&alpha_compare, 0, sizeof (alpha_compare));
3359
3360 if (fp_p != FLOAT_MODE_P (mode))
3361 {
3362 enum rtx_code cmp_code;
3363
3364 if (! TARGET_FIX)
3365 return 0;
3366
3367 /* If we have fp<->int register move instructions, do a cmov by
3368 performing the comparison in fp registers, and move the
3369 zero/nonzero value to integer registers, where we can then
3370 use a normal cmov, or vice-versa. */
3371
3372 switch (code)
3373 {
3374 case EQ: case LE: case LT: case LEU: case LTU:
3375 /* We have these compares. */
3376 cmp_code = code, code = NE;
3377 break;
3378
3379 case NE:
3380 /* This must be reversed. */
3381 cmp_code = EQ, code = EQ;
3382 break;
3383
3384 case GE: case GT: case GEU: case GTU:
3385 /* These normally need swapping, but for integer zero we have
3386 special patterns that recognize swapped operands. */
3387 if (!fp_p && op1 == const0_rtx)
3388 cmp_code = code, code = NE;
3389 else
3390 {
3391 cmp_code = swap_condition (code);
3392 code = NE;
3393 tem = op0, op0 = op1, op1 = tem;
3394 }
3395 break;
3396
3397 default:
3398 abort ();
3399 }
3400
3401 tem = gen_reg_rtx (cmp_op_mode);
3402 emit_insn (gen_rtx_SET (VOIDmode, tem,
3403 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
3404 op0, op1)));
3405
3406 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
3407 op0 = gen_lowpart (cmp_op_mode, tem);
3408 op1 = CONST0_RTX (cmp_op_mode);
3409 fp_p = !fp_p;
3410 local_fast_math = 1;
3411 }
3412
3413 /* We may be able to use a conditional move directly.
3414 This avoids emitting spurious compares. */
3415 if (signed_comparison_operator (cmp, VOIDmode)
3416 && (!fp_p || local_fast_math)
3417 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
3418 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3419
3420 /* We can't put the comparison inside the conditional move;
3421 emit a compare instruction and put that inside the
3422 conditional move. Make sure we emit only comparisons we have;
3423 swap or reverse as necessary. */
3424
3425 if (no_new_pseudos)
3426 return NULL_RTX;
3427
3428 switch (code)
3429 {
3430 case EQ: case LE: case LT: case LEU: case LTU:
3431 /* We have these compares: */
3432 break;
3433
3434 case NE:
3435 /* This must be reversed. */
3436 code = reverse_condition (code);
3437 cmov_code = EQ;
3438 break;
3439
3440 case GE: case GT: case GEU: case GTU:
3441 /* These must be swapped. */
3442 if (op1 != CONST0_RTX (cmp_mode))
3443 {
3444 code = swap_condition (code);
3445 tem = op0, op0 = op1, op1 = tem;
3446 }
3447 break;
3448
3449 default:
3450 abort ();
3451 }
3452
3453 if (!fp_p)
3454 {
3455 if (!reg_or_0_operand (op0, DImode))
3456 op0 = force_reg (DImode, op0);
3457 if (!reg_or_8bit_operand (op1, DImode))
3458 op1 = force_reg (DImode, op1);
3459 }
3460
3461 /* ??? We mark the branch mode to be CCmode to prevent the compare
3462 and cmov from being combined, since the compare insn follows IEEE
3463 rules that the cmov does not. */
3464 if (fp_p && !local_fast_math)
3465 cmov_mode = CCmode;
3466
3467 tem = gen_reg_rtx (cmp_op_mode);
3468 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
3469 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
3470 }
3471
3472 /* Simplify a conditional move of two constants into a setcc with
3473 arithmetic. This is done with a splitter since combine would
3474 just undo the work if done during code generation. It also catches
3475 cases we wouldn't have before cse. */
3476
3477 int
3478 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
3479 rtx t_rtx, rtx f_rtx)
3480 {
3481 HOST_WIDE_INT t, f, diff;
3482 enum machine_mode mode;
3483 rtx target, subtarget, tmp;
3484
3485 mode = GET_MODE (dest);
3486 t = INTVAL (t_rtx);
3487 f = INTVAL (f_rtx);
3488 diff = t - f;
3489
3490 if (((code == NE || code == EQ) && diff < 0)
3491 || (code == GE || code == GT))
3492 {
3493 code = reverse_condition (code);
3494 diff = t, t = f, f = diff;
3495 diff = t - f;
3496 }
3497
3498 subtarget = target = dest;
3499 if (mode != DImode)
3500 {
3501 target = gen_lowpart (DImode, dest);
3502 if (! no_new_pseudos)
3503 subtarget = gen_reg_rtx (DImode);
3504 else
3505 subtarget = target;
3506 }
3507 /* Below, we must be careful to use copy_rtx on target and subtarget
3508 in intermediate insns, as they may be a subreg rtx, which may not
3509 be shared. */
3510
3511 if (f == 0 && exact_log2 (diff) > 0
3512 /* On EV6, we've got enough shifters to make non-arithmetic shifts
3513 viable over a longer latency cmove. On EV5, the E0 slot is a
3514 scarce resource, and on EV4 shift has the same latency as a cmove. */
3515 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
3516 {
3517 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3518 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3519
3520 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
3521 GEN_INT (exact_log2 (t)));
3522 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3523 }
3524 else if (f == 0 && t == -1)
3525 {
3526 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3527 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3528
3529 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
3530 }
3531 else if (diff == 1 || diff == 4 || diff == 8)
3532 {
3533 rtx add_op;
3534
3535 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
3536 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
3537
3538 if (diff == 1)
3539 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
3540 else
3541 {
3542 add_op = GEN_INT (f);
3543 if (sext_add_operand (add_op, mode))
3544 {
3545 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
3546 GEN_INT (diff));
3547 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
3548 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
3549 }
3550 else
3551 return 0;
3552 }
3553 }
3554 else
3555 return 0;
3556
3557 return 1;
3558 }
3559 \f
3560 /* Look up the function X_floating library function name for the
3561 given operation. */
3562
3563 static const char *
3564 alpha_lookup_xfloating_lib_func (enum rtx_code code)
3565 {
3566 struct xfloating_op
3567 {
3568 const enum rtx_code code;
3569 const char *const func;
3570 };
3571
3572 static const struct xfloating_op vms_xfloating_ops[] =
3573 {
3574 { PLUS, "OTS$ADD_X" },
3575 { MINUS, "OTS$SUB_X" },
3576 { MULT, "OTS$MUL_X" },
3577 { DIV, "OTS$DIV_X" },
3578 { EQ, "OTS$EQL_X" },
3579 { NE, "OTS$NEQ_X" },
3580 { LT, "OTS$LSS_X" },
3581 { LE, "OTS$LEQ_X" },
3582 { GT, "OTS$GTR_X" },
3583 { GE, "OTS$GEQ_X" },
3584 { FIX, "OTS$CVTXQ" },
3585 { FLOAT, "OTS$CVTQX" },
3586 { UNSIGNED_FLOAT, "OTS$CVTQUX" },
3587 { FLOAT_EXTEND, "OTS$CVT_FLOAT_T_X" },
3588 { FLOAT_TRUNCATE, "OTS$CVT_FLOAT_X_T" },
3589 };
3590
3591 static const struct xfloating_op osf_xfloating_ops[] =
3592 {
3593 { PLUS, "_OtsAddX" },
3594 { MINUS, "_OtsSubX" },
3595 { MULT, "_OtsMulX" },
3596 { DIV, "_OtsDivX" },
3597 { EQ, "_OtsEqlX" },
3598 { NE, "_OtsNeqX" },
3599 { LT, "_OtsLssX" },
3600 { LE, "_OtsLeqX" },
3601 { GT, "_OtsGtrX" },
3602 { GE, "_OtsGeqX" },
3603 { FIX, "_OtsCvtXQ" },
3604 { FLOAT, "_OtsCvtQX" },
3605 { UNSIGNED_FLOAT, "_OtsCvtQUX" },
3606 { FLOAT_EXTEND, "_OtsConvertFloatTX" },
3607 { FLOAT_TRUNCATE, "_OtsConvertFloatXT" },
3608 };
3609
3610 const struct xfloating_op *ops;
3611 const long n = ARRAY_SIZE (osf_xfloating_ops);
3612 long i;
3613
3614 /* How irritating. Nothing to key off for the table. Hardcode
3615 knowledge of the G_floating routines. */
3616 if (TARGET_FLOAT_VAX)
3617 {
3618 if (TARGET_ABI_OPEN_VMS)
3619 {
3620 if (code == FLOAT_EXTEND)
3621 return "OTS$CVT_FLOAT_G_X";
3622 if (code == FLOAT_TRUNCATE)
3623 return "OTS$CVT_FLOAT_X_G";
3624 }
3625 else
3626 {
3627 if (code == FLOAT_EXTEND)
3628 return "_OtsConvertFloatGX";
3629 if (code == FLOAT_TRUNCATE)
3630 return "_OtsConvertFloatXG";
3631 }
3632 }
3633
3634 if (TARGET_ABI_OPEN_VMS)
3635 ops = vms_xfloating_ops;
3636 else
3637 ops = osf_xfloating_ops;
3638
3639 for (i = 0; i < n; ++i)
3640 if (ops[i].code == code)
3641 return ops[i].func;
3642
3643 abort();
3644 }
3645
3646 /* Most X_floating operations take the rounding mode as an argument.
3647 Compute that here. */
3648
3649 static int
3650 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3651 enum alpha_fp_rounding_mode round)
3652 {
3653 int mode;
3654
3655 switch (round)
3656 {
3657 case ALPHA_FPRM_NORM:
3658 mode = 2;
3659 break;
3660 case ALPHA_FPRM_MINF:
3661 mode = 1;
3662 break;
3663 case ALPHA_FPRM_CHOP:
3664 mode = 0;
3665 break;
3666 case ALPHA_FPRM_DYN:
3667 mode = 4;
3668 break;
3669 default:
3670 abort ();
3671
3672 /* XXX For reference, round to +inf is mode = 3. */
3673 }
3674
3675 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3676 mode |= 0x10000;
3677
3678 return mode;
3679 }
3680
3681 /* Emit an X_floating library function call.
3682
3683 Note that these functions do not follow normal calling conventions:
3684 TFmode arguments are passed in two integer registers (as opposed to
3685 indirect); TFmode return values appear in R16+R17.
3686
3687 FUNC is the function name to call.
3688 TARGET is where the output belongs.
3689 OPERANDS are the inputs.
3690 NOPERANDS is the count of inputs.
3691 EQUIV is the expression equivalent for the function.
3692 */
3693
3694 static void
3695 alpha_emit_xfloating_libcall (const char *func, rtx target, rtx operands[],
3696 int noperands, rtx equiv)
3697 {
3698 rtx usage = NULL_RTX, tmp, reg;
3699 int regno = 16, i;
3700
3701 start_sequence ();
3702
3703 for (i = 0; i < noperands; ++i)
3704 {
3705 switch (GET_MODE (operands[i]))
3706 {
3707 case TFmode:
3708 reg = gen_rtx_REG (TFmode, regno);
3709 regno += 2;
3710 break;
3711
3712 case DFmode:
3713 reg = gen_rtx_REG (DFmode, regno + 32);
3714 regno += 1;
3715 break;
3716
3717 case VOIDmode:
3718 if (GET_CODE (operands[i]) != CONST_INT)
3719 abort ();
3720 /* FALLTHRU */
3721 case DImode:
3722 reg = gen_rtx_REG (DImode, regno);
3723 regno += 1;
3724 break;
3725
3726 default:
3727 abort ();
3728 }
3729
3730 emit_move_insn (reg, operands[i]);
3731 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3732 }
3733
3734 switch (GET_MODE (target))
3735 {
3736 case TFmode:
3737 reg = gen_rtx_REG (TFmode, 16);
3738 break;
3739 case DFmode:
3740 reg = gen_rtx_REG (DFmode, 32);
3741 break;
3742 case DImode:
3743 reg = gen_rtx_REG (DImode, 0);
3744 break;
3745 default:
3746 abort ();
3747 }
3748
3749 tmp = gen_rtx_MEM (QImode, init_one_libfunc (func));
3750 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3751 const0_rtx, const0_rtx));
3752 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3753
3754 tmp = get_insns ();
3755 end_sequence ();
3756
3757 emit_libcall_block (tmp, target, reg, equiv);
3758 }
3759
3760 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3761
3762 void
3763 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3764 {
3765 const char *func;
3766 int mode;
3767 rtx out_operands[3];
3768
3769 func = alpha_lookup_xfloating_lib_func (code);
3770 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3771
3772 out_operands[0] = operands[1];
3773 out_operands[1] = operands[2];
3774 out_operands[2] = GEN_INT (mode);
3775 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3776 gen_rtx_fmt_ee (code, TFmode, operands[1],
3777 operands[2]));
3778 }
3779
3780 /* Emit an X_floating library function call for a comparison. */
3781
3782 static rtx
3783 alpha_emit_xfloating_compare (enum rtx_code code, rtx op0, rtx op1)
3784 {
3785 const char *func;
3786 rtx out, operands[2];
3787
3788 func = alpha_lookup_xfloating_lib_func (code);
3789
3790 operands[0] = op0;
3791 operands[1] = op1;
3792 out = gen_reg_rtx (DImode);
3793
3794 /* ??? Strange mode for equiv because what's actually returned
3795 is -1,0,1, not a proper boolean value. */
3796 alpha_emit_xfloating_libcall (func, out, operands, 2,
3797 gen_rtx_fmt_ee (code, CCmode, op0, op1));
3798
3799 return out;
3800 }
3801
3802 /* Emit an X_floating library function call for a conversion. */
3803
3804 void
3805 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3806 {
3807 int noperands = 1, mode;
3808 rtx out_operands[2];
3809 const char *func;
3810 enum rtx_code code = orig_code;
3811
3812 if (code == UNSIGNED_FIX)
3813 code = FIX;
3814
3815 func = alpha_lookup_xfloating_lib_func (code);
3816
3817 out_operands[0] = operands[1];
3818
3819 switch (code)
3820 {
3821 case FIX:
3822 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3823 out_operands[1] = GEN_INT (mode);
3824 noperands = 2;
3825 break;
3826 case FLOAT_TRUNCATE:
3827 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3828 out_operands[1] = GEN_INT (mode);
3829 noperands = 2;
3830 break;
3831 default:
3832 break;
3833 }
3834
3835 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3836 gen_rtx_fmt_e (orig_code,
3837 GET_MODE (operands[0]),
3838 operands[1]));
3839 }
3840
3841 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3842 OP[0] into OP[0,1]. Naturally, output operand ordering is
3843 little-endian. */
3844
3845 void
3846 alpha_split_tfmode_pair (rtx operands[4])
3847 {
3848 if (GET_CODE (operands[1]) == REG)
3849 {
3850 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3851 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3852 }
3853 else if (GET_CODE (operands[1]) == MEM)
3854 {
3855 operands[3] = adjust_address (operands[1], DImode, 8);
3856 operands[2] = adjust_address (operands[1], DImode, 0);
3857 }
3858 else if (operands[1] == CONST0_RTX (TFmode))
3859 operands[2] = operands[3] = const0_rtx;
3860 else
3861 abort ();
3862
3863 if (GET_CODE (operands[0]) == REG)
3864 {
3865 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3866 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3867 }
3868 else if (GET_CODE (operands[0]) == MEM)
3869 {
3870 operands[1] = adjust_address (operands[0], DImode, 8);
3871 operands[0] = adjust_address (operands[0], DImode, 0);
3872 }
3873 else
3874 abort ();
3875 }
3876
3877 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3878 op2 is a register containing the sign bit, operation is the
3879 logical operation to be performed. */
3880
3881 void
3882 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3883 {
3884 rtx high_bit = operands[2];
3885 rtx scratch;
3886 int move;
3887
3888 alpha_split_tfmode_pair (operands);
3889
3890 /* Detect three flavors of operand overlap. */
3891 move = 1;
3892 if (rtx_equal_p (operands[0], operands[2]))
3893 move = 0;
3894 else if (rtx_equal_p (operands[1], operands[2]))
3895 {
3896 if (rtx_equal_p (operands[0], high_bit))
3897 move = 2;
3898 else
3899 move = -1;
3900 }
3901
3902 if (move < 0)
3903 emit_move_insn (operands[0], operands[2]);
3904
3905 /* ??? If the destination overlaps both source tf and high_bit, then
3906 assume source tf is dead in its entirety and use the other half
3907 for a scratch register. Otherwise "scratch" is just the proper
3908 destination register. */
3909 scratch = operands[move < 2 ? 1 : 3];
3910
3911 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3912
3913 if (move > 0)
3914 {
3915 emit_move_insn (operands[0], operands[2]);
3916 if (move > 1)
3917 emit_move_insn (operands[1], scratch);
3918 }
3919 }
3920 \f
3921 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3922 unaligned data:
3923
3924 unsigned: signed:
3925 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3926 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3927 lda r3,X(r11) lda r3,X+2(r11)
3928 extwl r1,r3,r1 extql r1,r3,r1
3929 extwh r2,r3,r2 extqh r2,r3,r2
3930 or r1.r2.r1 or r1,r2,r1
3931 sra r1,48,r1
3932
3933 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3934 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3935 lda r3,X(r11) lda r3,X(r11)
3936 extll r1,r3,r1 extll r1,r3,r1
3937 extlh r2,r3,r2 extlh r2,r3,r2
3938 or r1.r2.r1 addl r1,r2,r1
3939
3940 quad: ldq_u r1,X(r11)
3941 ldq_u r2,X+7(r11)
3942 lda r3,X(r11)
3943 extql r1,r3,r1
3944 extqh r2,r3,r2
3945 or r1.r2.r1
3946 */
3947
3948 void
3949 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3950 HOST_WIDE_INT ofs, int sign)
3951 {
3952 rtx meml, memh, addr, extl, exth, tmp, mema;
3953 enum machine_mode mode;
3954
3955 meml = gen_reg_rtx (DImode);
3956 memh = gen_reg_rtx (DImode);
3957 addr = gen_reg_rtx (DImode);
3958 extl = gen_reg_rtx (DImode);
3959 exth = gen_reg_rtx (DImode);
3960
3961 mema = XEXP (mem, 0);
3962 if (GET_CODE (mema) == LO_SUM)
3963 mema = force_reg (Pmode, mema);
3964
3965 /* AND addresses cannot be in any alias set, since they may implicitly
3966 alias surrounding code. Ideally we'd have some alias set that
3967 covered all types except those with alignment 8 or higher. */
3968
3969 tmp = change_address (mem, DImode,
3970 gen_rtx_AND (DImode,
3971 plus_constant (mema, ofs),
3972 GEN_INT (-8)));
3973 set_mem_alias_set (tmp, 0);
3974 emit_move_insn (meml, tmp);
3975
3976 tmp = change_address (mem, DImode,
3977 gen_rtx_AND (DImode,
3978 plus_constant (mema, ofs + size - 1),
3979 GEN_INT (-8)));
3980 set_mem_alias_set (tmp, 0);
3981 emit_move_insn (memh, tmp);
3982
3983 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3984 {
3985 emit_move_insn (addr, plus_constant (mema, -1));
3986
3987 emit_insn (gen_extqh_be (extl, meml, addr));
3988 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3989
3990 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3991 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3992 addr, 1, OPTAB_WIDEN);
3993 }
3994 else if (sign && size == 2)
3995 {
3996 emit_move_insn (addr, plus_constant (mema, ofs+2));
3997
3998 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3999 emit_insn (gen_extqh_le (exth, memh, addr));
4000
4001 /* We must use tgt here for the target. Alpha-vms port fails if we use
4002 addr for the target, because addr is marked as a pointer and combine
4003 knows that pointers are always sign-extended 32 bit values. */
4004 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
4005 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
4006 addr, 1, OPTAB_WIDEN);
4007 }
4008 else
4009 {
4010 if (WORDS_BIG_ENDIAN)
4011 {
4012 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
4013 switch ((int) size)
4014 {
4015 case 2:
4016 emit_insn (gen_extwh_be (extl, meml, addr));
4017 mode = HImode;
4018 break;
4019
4020 case 4:
4021 emit_insn (gen_extlh_be (extl, meml, addr));
4022 mode = SImode;
4023 break;
4024
4025 case 8:
4026 emit_insn (gen_extqh_be (extl, meml, addr));
4027 mode = DImode;
4028 break;
4029
4030 default:
4031 abort ();
4032 }
4033 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
4034 }
4035 else
4036 {
4037 emit_move_insn (addr, plus_constant (mema, ofs));
4038 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
4039 switch ((int) size)
4040 {
4041 case 2:
4042 emit_insn (gen_extwh_le (exth, memh, addr));
4043 mode = HImode;
4044 break;
4045
4046 case 4:
4047 emit_insn (gen_extlh_le (exth, memh, addr));
4048 mode = SImode;
4049 break;
4050
4051 case 8:
4052 emit_insn (gen_extqh_le (exth, memh, addr));
4053 mode = DImode;
4054 break;
4055
4056 default:
4057 abort();
4058 }
4059 }
4060
4061 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
4062 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
4063 sign, OPTAB_WIDEN);
4064 }
4065
4066 if (addr != tgt)
4067 emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
4068 }
4069
4070 /* Similarly, use ins and msk instructions to perform unaligned stores. */
4071
4072 void
4073 alpha_expand_unaligned_store (rtx dst, rtx src,
4074 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
4075 {
4076 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
4077
4078 dstl = gen_reg_rtx (DImode);
4079 dsth = gen_reg_rtx (DImode);
4080 insl = gen_reg_rtx (DImode);
4081 insh = gen_reg_rtx (DImode);
4082
4083 dsta = XEXP (dst, 0);
4084 if (GET_CODE (dsta) == LO_SUM)
4085 dsta = force_reg (Pmode, dsta);
4086
4087 /* AND addresses cannot be in any alias set, since they may implicitly
4088 alias surrounding code. Ideally we'd have some alias set that
4089 covered all types except those with alignment 8 or higher. */
4090
4091 meml = change_address (dst, DImode,
4092 gen_rtx_AND (DImode,
4093 plus_constant (dsta, ofs),
4094 GEN_INT (-8)));
4095 set_mem_alias_set (meml, 0);
4096
4097 memh = change_address (dst, DImode,
4098 gen_rtx_AND (DImode,
4099 plus_constant (dsta, ofs + size - 1),
4100 GEN_INT (-8)));
4101 set_mem_alias_set (memh, 0);
4102
4103 emit_move_insn (dsth, memh);
4104 emit_move_insn (dstl, meml);
4105 if (WORDS_BIG_ENDIAN)
4106 {
4107 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
4108
4109 if (src != const0_rtx)
4110 {
4111 switch ((int) size)
4112 {
4113 case 2:
4114 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
4115 break;
4116 case 4:
4117 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
4118 break;
4119 case 8:
4120 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
4121 break;
4122 }
4123 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
4124 GEN_INT (size*8), addr));
4125 }
4126
4127 switch ((int) size)
4128 {
4129 case 2:
4130 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
4131 break;
4132 case 4:
4133 {
4134 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4135 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
4136 break;
4137 }
4138 case 8:
4139 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
4140 break;
4141 }
4142
4143 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
4144 }
4145 else
4146 {
4147 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
4148
4149 if (src != const0_rtx)
4150 {
4151 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
4152 GEN_INT (size*8), addr));
4153
4154 switch ((int) size)
4155 {
4156 case 2:
4157 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
4158 break;
4159 case 4:
4160 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
4161 break;
4162 case 8:
4163 emit_insn (gen_insql_le (insl, src, addr));
4164 break;
4165 }
4166 }
4167
4168 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
4169
4170 switch ((int) size)
4171 {
4172 case 2:
4173 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
4174 break;
4175 case 4:
4176 {
4177 rtx msk = immed_double_const (0xffffffff, 0, DImode);
4178 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
4179 break;
4180 }
4181 case 8:
4182 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
4183 break;
4184 }
4185 }
4186
4187 if (src != const0_rtx)
4188 {
4189 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
4190 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
4191 }
4192
4193 if (WORDS_BIG_ENDIAN)
4194 {
4195 emit_move_insn (meml, dstl);
4196 emit_move_insn (memh, dsth);
4197 }
4198 else
4199 {
4200 /* Must store high before low for degenerate case of aligned. */
4201 emit_move_insn (memh, dsth);
4202 emit_move_insn (meml, dstl);
4203 }
4204 }
4205
4206 /* The block move code tries to maximize speed by separating loads and
4207 stores at the expense of register pressure: we load all of the data
4208 before we store it back out. There are two secondary effects worth
4209 mentioning, that this speeds copying to/from aligned and unaligned
4210 buffers, and that it makes the code significantly easier to write. */
4211
4212 #define MAX_MOVE_WORDS 8
4213
4214 /* Load an integral number of consecutive unaligned quadwords. */
4215
4216 static void
4217 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
4218 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
4219 {
4220 rtx const im8 = GEN_INT (-8);
4221 rtx const i64 = GEN_INT (64);
4222 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
4223 rtx sreg, areg, tmp, smema;
4224 HOST_WIDE_INT i;
4225
4226 smema = XEXP (smem, 0);
4227 if (GET_CODE (smema) == LO_SUM)
4228 smema = force_reg (Pmode, smema);
4229
4230 /* Generate all the tmp registers we need. */
4231 for (i = 0; i < words; ++i)
4232 {
4233 data_regs[i] = out_regs[i];
4234 ext_tmps[i] = gen_reg_rtx (DImode);
4235 }
4236 data_regs[words] = gen_reg_rtx (DImode);
4237
4238 if (ofs != 0)
4239 smem = adjust_address (smem, GET_MODE (smem), ofs);
4240
4241 /* Load up all of the source data. */
4242 for (i = 0; i < words; ++i)
4243 {
4244 tmp = change_address (smem, DImode,
4245 gen_rtx_AND (DImode,
4246 plus_constant (smema, 8*i),
4247 im8));
4248 set_mem_alias_set (tmp, 0);
4249 emit_move_insn (data_regs[i], tmp);
4250 }
4251
4252 tmp = change_address (smem, DImode,
4253 gen_rtx_AND (DImode,
4254 plus_constant (smema, 8*words - 1),
4255 im8));
4256 set_mem_alias_set (tmp, 0);
4257 emit_move_insn (data_regs[words], tmp);
4258
4259 /* Extract the half-word fragments. Unfortunately DEC decided to make
4260 extxh with offset zero a noop instead of zeroing the register, so
4261 we must take care of that edge condition ourselves with cmov. */
4262
4263 sreg = copy_addr_to_reg (smema);
4264 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
4265 1, OPTAB_WIDEN);
4266 if (WORDS_BIG_ENDIAN)
4267 emit_move_insn (sreg, plus_constant (sreg, 7));
4268 for (i = 0; i < words; ++i)
4269 {
4270 if (WORDS_BIG_ENDIAN)
4271 {
4272 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
4273 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
4274 }
4275 else
4276 {
4277 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
4278 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
4279 }
4280 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
4281 gen_rtx_IF_THEN_ELSE (DImode,
4282 gen_rtx_EQ (DImode, areg,
4283 const0_rtx),
4284 const0_rtx, ext_tmps[i])));
4285 }
4286
4287 /* Merge the half-words into whole words. */
4288 for (i = 0; i < words; ++i)
4289 {
4290 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
4291 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
4292 }
4293 }
4294
4295 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
4296 may be NULL to store zeros. */
4297
4298 static void
4299 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
4300 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
4301 {
4302 rtx const im8 = GEN_INT (-8);
4303 rtx const i64 = GEN_INT (64);
4304 rtx ins_tmps[MAX_MOVE_WORDS];
4305 rtx st_tmp_1, st_tmp_2, dreg;
4306 rtx st_addr_1, st_addr_2, dmema;
4307 HOST_WIDE_INT i;
4308
4309 dmema = XEXP (dmem, 0);
4310 if (GET_CODE (dmema) == LO_SUM)
4311 dmema = force_reg (Pmode, dmema);
4312
4313 /* Generate all the tmp registers we need. */
4314 if (data_regs != NULL)
4315 for (i = 0; i < words; ++i)
4316 ins_tmps[i] = gen_reg_rtx(DImode);
4317 st_tmp_1 = gen_reg_rtx(DImode);
4318 st_tmp_2 = gen_reg_rtx(DImode);
4319
4320 if (ofs != 0)
4321 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
4322
4323 st_addr_2 = change_address (dmem, DImode,
4324 gen_rtx_AND (DImode,
4325 plus_constant (dmema, words*8 - 1),
4326 im8));
4327 set_mem_alias_set (st_addr_2, 0);
4328
4329 st_addr_1 = change_address (dmem, DImode,
4330 gen_rtx_AND (DImode, dmema, im8));
4331 set_mem_alias_set (st_addr_1, 0);
4332
4333 /* Load up the destination end bits. */
4334 emit_move_insn (st_tmp_2, st_addr_2);
4335 emit_move_insn (st_tmp_1, st_addr_1);
4336
4337 /* Shift the input data into place. */
4338 dreg = copy_addr_to_reg (dmema);
4339 if (WORDS_BIG_ENDIAN)
4340 emit_move_insn (dreg, plus_constant (dreg, 7));
4341 if (data_regs != NULL)
4342 {
4343 for (i = words-1; i >= 0; --i)
4344 {
4345 if (WORDS_BIG_ENDIAN)
4346 {
4347 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
4348 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
4349 }
4350 else
4351 {
4352 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
4353 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
4354 }
4355 }
4356 for (i = words-1; i > 0; --i)
4357 {
4358 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
4359 ins_tmps[i-1], ins_tmps[i-1], 1,
4360 OPTAB_WIDEN);
4361 }
4362 }
4363
4364 /* Split and merge the ends with the destination data. */
4365 if (WORDS_BIG_ENDIAN)
4366 {
4367 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
4368 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
4369 }
4370 else
4371 {
4372 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
4373 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
4374 }
4375
4376 if (data_regs != NULL)
4377 {
4378 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
4379 st_tmp_2, 1, OPTAB_WIDEN);
4380 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
4381 st_tmp_1, 1, OPTAB_WIDEN);
4382 }
4383
4384 /* Store it all. */
4385 if (WORDS_BIG_ENDIAN)
4386 emit_move_insn (st_addr_1, st_tmp_1);
4387 else
4388 emit_move_insn (st_addr_2, st_tmp_2);
4389 for (i = words-1; i > 0; --i)
4390 {
4391 rtx tmp = change_address (dmem, DImode,
4392 gen_rtx_AND (DImode,
4393 plus_constant(dmema,
4394 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
4395 im8));
4396 set_mem_alias_set (tmp, 0);
4397 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
4398 }
4399 if (WORDS_BIG_ENDIAN)
4400 emit_move_insn (st_addr_2, st_tmp_2);
4401 else
4402 emit_move_insn (st_addr_1, st_tmp_1);
4403 }
4404
4405
4406 /* Expand string/block move operations.
4407
4408 operands[0] is the pointer to the destination.
4409 operands[1] is the pointer to the source.
4410 operands[2] is the number of bytes to move.
4411 operands[3] is the alignment. */
4412
4413 int
4414 alpha_expand_block_move (rtx operands[])
4415 {
4416 rtx bytes_rtx = operands[2];
4417 rtx align_rtx = operands[3];
4418 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4419 HOST_WIDE_INT bytes = orig_bytes;
4420 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
4421 HOST_WIDE_INT dst_align = src_align;
4422 rtx orig_src = operands[1];
4423 rtx orig_dst = operands[0];
4424 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
4425 rtx tmp;
4426 unsigned int i, words, ofs, nregs = 0;
4427
4428 if (orig_bytes <= 0)
4429 return 1;
4430 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4431 return 0;
4432
4433 /* Look for additional alignment information from recorded register info. */
4434
4435 tmp = XEXP (orig_src, 0);
4436 if (GET_CODE (tmp) == REG)
4437 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4438 else if (GET_CODE (tmp) == PLUS
4439 && GET_CODE (XEXP (tmp, 0)) == REG
4440 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4441 {
4442 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4443 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4444
4445 if (a > src_align)
4446 {
4447 if (a >= 64 && c % 8 == 0)
4448 src_align = 64;
4449 else if (a >= 32 && c % 4 == 0)
4450 src_align = 32;
4451 else if (a >= 16 && c % 2 == 0)
4452 src_align = 16;
4453 }
4454 }
4455
4456 tmp = XEXP (orig_dst, 0);
4457 if (GET_CODE (tmp) == REG)
4458 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4459 else if (GET_CODE (tmp) == PLUS
4460 && GET_CODE (XEXP (tmp, 0)) == REG
4461 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4462 {
4463 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4464 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4465
4466 if (a > dst_align)
4467 {
4468 if (a >= 64 && c % 8 == 0)
4469 dst_align = 64;
4470 else if (a >= 32 && c % 4 == 0)
4471 dst_align = 32;
4472 else if (a >= 16 && c % 2 == 0)
4473 dst_align = 16;
4474 }
4475 }
4476
4477 /* Load the entire block into registers. */
4478 if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
4479 {
4480 enum machine_mode mode;
4481
4482 tmp = XEXP (XEXP (orig_src, 0), 0);
4483
4484 /* Don't use the existing register if we're reading more than
4485 is held in the register. Nor if there is not a mode that
4486 handles the exact size. */
4487 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
4488 if (GET_CODE (tmp) == REG
4489 && mode != BLKmode
4490 && GET_MODE_SIZE (GET_MODE (tmp)) >= bytes)
4491 {
4492 if (mode == TImode)
4493 {
4494 data_regs[nregs] = gen_lowpart (DImode, tmp);
4495 data_regs[nregs + 1] = gen_highpart (DImode, tmp);
4496 nregs += 2;
4497 }
4498 else
4499 data_regs[nregs++] = gen_lowpart (mode, tmp);
4500
4501 goto src_done;
4502 }
4503
4504 /* No appropriate mode; fall back on memory. */
4505 orig_src = replace_equiv_address (orig_src,
4506 copy_addr_to_reg (XEXP (orig_src, 0)));
4507 src_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4508 }
4509
4510 ofs = 0;
4511 if (src_align >= 64 && bytes >= 8)
4512 {
4513 words = bytes / 8;
4514
4515 for (i = 0; i < words; ++i)
4516 data_regs[nregs + i] = gen_reg_rtx (DImode);
4517
4518 for (i = 0; i < words; ++i)
4519 emit_move_insn (data_regs[nregs + i],
4520 adjust_address (orig_src, DImode, ofs + i * 8));
4521
4522 nregs += words;
4523 bytes -= words * 8;
4524 ofs += words * 8;
4525 }
4526
4527 if (src_align >= 32 && bytes >= 4)
4528 {
4529 words = bytes / 4;
4530
4531 for (i = 0; i < words; ++i)
4532 data_regs[nregs + i] = gen_reg_rtx (SImode);
4533
4534 for (i = 0; i < words; ++i)
4535 emit_move_insn (data_regs[nregs + i],
4536 adjust_address (orig_src, SImode, ofs + i * 4));
4537
4538 nregs += words;
4539 bytes -= words * 4;
4540 ofs += words * 4;
4541 }
4542
4543 if (bytes >= 8)
4544 {
4545 words = bytes / 8;
4546
4547 for (i = 0; i < words+1; ++i)
4548 data_regs[nregs + i] = gen_reg_rtx (DImode);
4549
4550 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
4551 words, ofs);
4552
4553 nregs += words;
4554 bytes -= words * 8;
4555 ofs += words * 8;
4556 }
4557
4558 if (! TARGET_BWX && bytes >= 4)
4559 {
4560 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
4561 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
4562 bytes -= 4;
4563 ofs += 4;
4564 }
4565
4566 if (bytes >= 2)
4567 {
4568 if (src_align >= 16)
4569 {
4570 do {
4571 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4572 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
4573 bytes -= 2;
4574 ofs += 2;
4575 } while (bytes >= 2);
4576 }
4577 else if (! TARGET_BWX)
4578 {
4579 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
4580 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
4581 bytes -= 2;
4582 ofs += 2;
4583 }
4584 }
4585
4586 while (bytes > 0)
4587 {
4588 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
4589 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
4590 bytes -= 1;
4591 ofs += 1;
4592 }
4593
4594 src_done:
4595
4596 if (nregs > ARRAY_SIZE (data_regs))
4597 abort ();
4598
4599 /* Now save it back out again. */
4600
4601 i = 0, ofs = 0;
4602
4603 if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
4604 {
4605 enum machine_mode mode;
4606 tmp = XEXP (XEXP (orig_dst, 0), 0);
4607
4608 mode = mode_for_size (orig_bytes * BITS_PER_UNIT, MODE_INT, 1);
4609 if (GET_CODE (tmp) == REG && GET_MODE (tmp) == mode)
4610 {
4611 if (nregs == 1)
4612 {
4613 emit_move_insn (tmp, data_regs[0]);
4614 i = 1;
4615 goto dst_done;
4616 }
4617
4618 else if (nregs == 2 && mode == TImode)
4619 {
4620 /* Undo the subregging done above when copying between
4621 two TImode registers. */
4622 if (GET_CODE (data_regs[0]) == SUBREG
4623 && GET_MODE (SUBREG_REG (data_regs[0])) == TImode)
4624 emit_move_insn (tmp, SUBREG_REG (data_regs[0]));
4625 else
4626 {
4627 rtx seq;
4628
4629 start_sequence ();
4630 emit_move_insn (gen_lowpart (DImode, tmp), data_regs[0]);
4631 emit_move_insn (gen_highpart (DImode, tmp), data_regs[1]);
4632 seq = get_insns ();
4633 end_sequence ();
4634
4635 emit_no_conflict_block (seq, tmp, data_regs[0],
4636 data_regs[1], NULL_RTX);
4637 }
4638
4639 i = 2;
4640 goto dst_done;
4641 }
4642 }
4643
4644 /* ??? If nregs > 1, consider reconstructing the word in regs. */
4645 /* ??? Optimize mode < dst_mode with strict_low_part. */
4646
4647 /* No appropriate mode; fall back on memory. We can speed things
4648 up by recognizing extra alignment information. */
4649 orig_dst = replace_equiv_address (orig_dst,
4650 copy_addr_to_reg (XEXP (orig_dst, 0)));
4651 dst_align = GET_MODE_BITSIZE (GET_MODE (tmp));
4652 }
4653
4654 /* Write out the data in whatever chunks reading the source allowed. */
4655 if (dst_align >= 64)
4656 {
4657 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4658 {
4659 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4660 data_regs[i]);
4661 ofs += 8;
4662 i++;
4663 }
4664 }
4665
4666 if (dst_align >= 32)
4667 {
4668 /* If the source has remaining DImode regs, write them out in
4669 two pieces. */
4670 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4671 {
4672 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4673 NULL_RTX, 1, OPTAB_WIDEN);
4674
4675 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4676 gen_lowpart (SImode, data_regs[i]));
4677 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4678 gen_lowpart (SImode, tmp));
4679 ofs += 8;
4680 i++;
4681 }
4682
4683 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4684 {
4685 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4686 data_regs[i]);
4687 ofs += 4;
4688 i++;
4689 }
4690 }
4691
4692 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4693 {
4694 /* Write out a remaining block of words using unaligned methods. */
4695
4696 for (words = 1; i + words < nregs; words++)
4697 if (GET_MODE (data_regs[i + words]) != DImode)
4698 break;
4699
4700 if (words == 1)
4701 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4702 else
4703 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4704 words, ofs);
4705
4706 i += words;
4707 ofs += words * 8;
4708 }
4709
4710 /* Due to the above, this won't be aligned. */
4711 /* ??? If we have more than one of these, consider constructing full
4712 words in registers and using alpha_expand_unaligned_store_words. */
4713 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4714 {
4715 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4716 ofs += 4;
4717 i++;
4718 }
4719
4720 if (dst_align >= 16)
4721 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4722 {
4723 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4724 i++;
4725 ofs += 2;
4726 }
4727 else
4728 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4729 {
4730 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4731 i++;
4732 ofs += 2;
4733 }
4734
4735 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
4736 {
4737 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4738 i++;
4739 ofs += 1;
4740 }
4741
4742 dst_done:
4743
4744 if (i != nregs)
4745 abort ();
4746
4747 return 1;
4748 }
4749
4750 int
4751 alpha_expand_block_clear (rtx operands[])
4752 {
4753 rtx bytes_rtx = operands[1];
4754 rtx align_rtx = operands[2];
4755 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4756 HOST_WIDE_INT bytes = orig_bytes;
4757 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4758 HOST_WIDE_INT alignofs = 0;
4759 rtx orig_dst = operands[0];
4760 rtx tmp;
4761 int i, words, ofs = 0;
4762
4763 if (orig_bytes <= 0)
4764 return 1;
4765 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4766 return 0;
4767
4768 /* Look for stricter alignment. */
4769 tmp = XEXP (orig_dst, 0);
4770 if (GET_CODE (tmp) == REG)
4771 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4772 else if (GET_CODE (tmp) == PLUS
4773 && GET_CODE (XEXP (tmp, 0)) == REG
4774 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4775 {
4776 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4777 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4778
4779 if (a > align)
4780 {
4781 if (a >= 64)
4782 align = a, alignofs = 8 - c % 8;
4783 else if (a >= 32)
4784 align = a, alignofs = 4 - c % 4;
4785 else if (a >= 16)
4786 align = a, alignofs = 2 - c % 2;
4787 }
4788 }
4789 else if (GET_CODE (tmp) == ADDRESSOF)
4790 {
4791 enum machine_mode mode;
4792
4793 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 1);
4794 if (GET_MODE (XEXP (tmp, 0)) == mode)
4795 {
4796 emit_move_insn (XEXP (tmp, 0), const0_rtx);
4797 return 1;
4798 }
4799
4800 /* No appropriate mode; fall back on memory. */
4801 orig_dst = replace_equiv_address (orig_dst, copy_addr_to_reg (tmp));
4802 align = GET_MODE_BITSIZE (GET_MODE (XEXP (tmp, 0)));
4803 }
4804
4805 /* Handle an unaligned prefix first. */
4806
4807 if (alignofs > 0)
4808 {
4809 #if HOST_BITS_PER_WIDE_INT >= 64
4810 /* Given that alignofs is bounded by align, the only time BWX could
4811 generate three stores is for a 7 byte fill. Prefer two individual
4812 stores over a load/mask/store sequence. */
4813 if ((!TARGET_BWX || alignofs == 7)
4814 && align >= 32
4815 && !(alignofs == 4 && bytes >= 4))
4816 {
4817 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4818 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4819 rtx mem, tmp;
4820 HOST_WIDE_INT mask;
4821
4822 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4823 set_mem_alias_set (mem, 0);
4824
4825 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4826 if (bytes < alignofs)
4827 {
4828 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4829 ofs += bytes;
4830 bytes = 0;
4831 }
4832 else
4833 {
4834 bytes -= alignofs;
4835 ofs += alignofs;
4836 }
4837 alignofs = 0;
4838
4839 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4840 NULL_RTX, 1, OPTAB_WIDEN);
4841
4842 emit_move_insn (mem, tmp);
4843 }
4844 #endif
4845
4846 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4847 {
4848 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4849 bytes -= 1;
4850 ofs += 1;
4851 alignofs -= 1;
4852 }
4853 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4854 {
4855 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4856 bytes -= 2;
4857 ofs += 2;
4858 alignofs -= 2;
4859 }
4860 if (alignofs == 4 && bytes >= 4)
4861 {
4862 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4863 bytes -= 4;
4864 ofs += 4;
4865 alignofs = 0;
4866 }
4867
4868 /* If we've not used the extra lead alignment information by now,
4869 we won't be able to. Downgrade align to match what's left over. */
4870 if (alignofs > 0)
4871 {
4872 alignofs = alignofs & -alignofs;
4873 align = MIN (align, alignofs * BITS_PER_UNIT);
4874 }
4875 }
4876
4877 /* Handle a block of contiguous long-words. */
4878
4879 if (align >= 64 && bytes >= 8)
4880 {
4881 words = bytes / 8;
4882
4883 for (i = 0; i < words; ++i)
4884 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4885 const0_rtx);
4886
4887 bytes -= words * 8;
4888 ofs += words * 8;
4889 }
4890
4891 /* If the block is large and appropriately aligned, emit a single
4892 store followed by a sequence of stq_u insns. */
4893
4894 if (align >= 32 && bytes > 16)
4895 {
4896 rtx orig_dsta;
4897
4898 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4899 bytes -= 4;
4900 ofs += 4;
4901
4902 orig_dsta = XEXP (orig_dst, 0);
4903 if (GET_CODE (orig_dsta) == LO_SUM)
4904 orig_dsta = force_reg (Pmode, orig_dsta);
4905
4906 words = bytes / 8;
4907 for (i = 0; i < words; ++i)
4908 {
4909 rtx mem
4910 = change_address (orig_dst, DImode,
4911 gen_rtx_AND (DImode,
4912 plus_constant (orig_dsta, ofs + i*8),
4913 GEN_INT (-8)));
4914 set_mem_alias_set (mem, 0);
4915 emit_move_insn (mem, const0_rtx);
4916 }
4917
4918 /* Depending on the alignment, the first stq_u may have overlapped
4919 with the initial stl, which means that the last stq_u didn't
4920 write as much as it would appear. Leave those questionable bytes
4921 unaccounted for. */
4922 bytes -= words * 8 - 4;
4923 ofs += words * 8 - 4;
4924 }
4925
4926 /* Handle a smaller block of aligned words. */
4927
4928 if ((align >= 64 && bytes == 4)
4929 || (align == 32 && bytes >= 4))
4930 {
4931 words = bytes / 4;
4932
4933 for (i = 0; i < words; ++i)
4934 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4935 const0_rtx);
4936
4937 bytes -= words * 4;
4938 ofs += words * 4;
4939 }
4940
4941 /* An unaligned block uses stq_u stores for as many as possible. */
4942
4943 if (bytes >= 8)
4944 {
4945 words = bytes / 8;
4946
4947 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4948
4949 bytes -= words * 8;
4950 ofs += words * 8;
4951 }
4952
4953 /* Next clean up any trailing pieces. */
4954
4955 #if HOST_BITS_PER_WIDE_INT >= 64
4956 /* Count the number of bits in BYTES for which aligned stores could
4957 be emitted. */
4958 words = 0;
4959 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4960 if (bytes & i)
4961 words += 1;
4962
4963 /* If we have appropriate alignment (and it wouldn't take too many
4964 instructions otherwise), mask out the bytes we need. */
4965 if (TARGET_BWX ? words > 2 : bytes > 0)
4966 {
4967 if (align >= 64)
4968 {
4969 rtx mem, tmp;
4970 HOST_WIDE_INT mask;
4971
4972 mem = adjust_address (orig_dst, DImode, ofs);
4973 set_mem_alias_set (mem, 0);
4974
4975 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4976
4977 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4978 NULL_RTX, 1, OPTAB_WIDEN);
4979
4980 emit_move_insn (mem, tmp);
4981 return 1;
4982 }
4983 else if (align >= 32 && bytes < 4)
4984 {
4985 rtx mem, tmp;
4986 HOST_WIDE_INT mask;
4987
4988 mem = adjust_address (orig_dst, SImode, ofs);
4989 set_mem_alias_set (mem, 0);
4990
4991 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4992
4993 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4994 NULL_RTX, 1, OPTAB_WIDEN);
4995
4996 emit_move_insn (mem, tmp);
4997 return 1;
4998 }
4999 }
5000 #endif
5001
5002 if (!TARGET_BWX && bytes >= 4)
5003 {
5004 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
5005 bytes -= 4;
5006 ofs += 4;
5007 }
5008
5009 if (bytes >= 2)
5010 {
5011 if (align >= 16)
5012 {
5013 do {
5014 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
5015 const0_rtx);
5016 bytes -= 2;
5017 ofs += 2;
5018 } while (bytes >= 2);
5019 }
5020 else if (! TARGET_BWX)
5021 {
5022 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
5023 bytes -= 2;
5024 ofs += 2;
5025 }
5026 }
5027
5028 while (bytes > 0)
5029 {
5030 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
5031 bytes -= 1;
5032 ofs += 1;
5033 }
5034
5035 return 1;
5036 }
5037
5038 /* Returns a mask so that zap(x, value) == x & mask. */
5039
5040 rtx
5041 alpha_expand_zap_mask (HOST_WIDE_INT value)
5042 {
5043 rtx result;
5044 int i;
5045
5046 if (HOST_BITS_PER_WIDE_INT >= 64)
5047 {
5048 HOST_WIDE_INT mask = 0;
5049
5050 for (i = 7; i >= 0; --i)
5051 {
5052 mask <<= 8;
5053 if (!((value >> i) & 1))
5054 mask |= 0xff;
5055 }
5056
5057 result = gen_int_mode (mask, DImode);
5058 }
5059 else if (HOST_BITS_PER_WIDE_INT == 32)
5060 {
5061 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
5062
5063 for (i = 7; i >= 4; --i)
5064 {
5065 mask_hi <<= 8;
5066 if (!((value >> i) & 1))
5067 mask_hi |= 0xff;
5068 }
5069
5070 for (i = 3; i >= 0; --i)
5071 {
5072 mask_lo <<= 8;
5073 if (!((value >> i) & 1))
5074 mask_lo |= 0xff;
5075 }
5076
5077 result = immed_double_const (mask_lo, mask_hi, DImode);
5078 }
5079 else
5080 abort ();
5081
5082 return result;
5083 }
5084
5085 void
5086 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
5087 enum machine_mode mode,
5088 rtx op0, rtx op1, rtx op2)
5089 {
5090 op0 = gen_lowpart (mode, op0);
5091
5092 if (op1 == const0_rtx)
5093 op1 = CONST0_RTX (mode);
5094 else
5095 op1 = gen_lowpart (mode, op1);
5096
5097 if (op2 == const0_rtx)
5098 op2 = CONST0_RTX (mode);
5099 else
5100 op2 = gen_lowpart (mode, op2);
5101
5102 emit_insn ((*gen) (op0, op1, op2));
5103 }
5104 \f
5105 /* Adjust the cost of a scheduling dependency. Return the new cost of
5106 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5107
5108 static int
5109 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5110 {
5111 enum attr_type insn_type, dep_insn_type;
5112
5113 /* If the dependence is an anti-dependence, there is no cost. For an
5114 output dependence, there is sometimes a cost, but it doesn't seem
5115 worth handling those few cases. */
5116 if (REG_NOTE_KIND (link) != 0)
5117 return cost;
5118
5119 /* If we can't recognize the insns, we can't really do anything. */
5120 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
5121 return cost;
5122
5123 insn_type = get_attr_type (insn);
5124 dep_insn_type = get_attr_type (dep_insn);
5125
5126 /* Bring in the user-defined memory latency. */
5127 if (dep_insn_type == TYPE_ILD
5128 || dep_insn_type == TYPE_FLD
5129 || dep_insn_type == TYPE_LDSYM)
5130 cost += alpha_memory_latency-1;
5131
5132 /* Everything else handled in DFA bypasses now. */
5133
5134 return cost;
5135 }
5136
5137 /* The number of instructions that can be issued per cycle. */
5138
5139 static int
5140 alpha_issue_rate (void)
5141 {
5142 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
5143 }
5144
5145 static int
5146 alpha_use_dfa_pipeline_interface (void)
5147 {
5148 return true;
5149 }
5150
5151 /* How many alternative schedules to try. This should be as wide as the
5152 scheduling freedom in the DFA, but no wider. Making this value too
5153 large results extra work for the scheduler.
5154
5155 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
5156 alternative schedules. For EV5, we can choose between E0/E1 and
5157 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
5158
5159 static int
5160 alpha_multipass_dfa_lookahead (void)
5161 {
5162 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
5163 }
5164 \f
5165 /* Machine-specific function data. */
5166
5167 struct machine_function GTY(())
5168 {
5169 /* For unicosmk. */
5170 /* List of call information words for calls from this function. */
5171 struct rtx_def *first_ciw;
5172 struct rtx_def *last_ciw;
5173 int ciw_count;
5174
5175 /* List of deferred case vectors. */
5176 struct rtx_def *addr_list;
5177
5178 /* For OSF. */
5179 const char *some_ld_name;
5180 };
5181
5182 /* How to allocate a 'struct machine_function'. */
5183
5184 static struct machine_function *
5185 alpha_init_machine_status (void)
5186 {
5187 return ((struct machine_function *)
5188 ggc_alloc_cleared (sizeof (struct machine_function)));
5189 }
5190
5191 /* Functions to save and restore alpha_return_addr_rtx. */
5192
5193 /* Start the ball rolling with RETURN_ADDR_RTX. */
5194
5195 rtx
5196 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5197 {
5198 if (count != 0)
5199 return const0_rtx;
5200
5201 return get_hard_reg_initial_val (Pmode, REG_RA);
5202 }
5203
5204 /* Return or create a pseudo containing the gp value for the current
5205 function. Needed only if TARGET_LD_BUGGY_LDGP. */
5206
5207 rtx
5208 alpha_gp_save_rtx (void)
5209 {
5210 rtx r = get_hard_reg_initial_val (DImode, 29);
5211 if (GET_CODE (r) != MEM)
5212 r = gen_mem_addressof (r, NULL_TREE, /*rescan=*/true);
5213 return r;
5214 }
5215
5216 static int
5217 alpha_ra_ever_killed (void)
5218 {
5219 rtx top;
5220
5221 if (!has_hard_reg_initial_val (Pmode, REG_RA))
5222 return regs_ever_live[REG_RA];
5223
5224 push_topmost_sequence ();
5225 top = get_insns ();
5226 pop_topmost_sequence ();
5227
5228 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
5229 }
5230
5231 \f
5232 /* Return the trap mode suffix applicable to the current
5233 instruction, or NULL. */
5234
5235 static const char *
5236 get_trap_mode_suffix (void)
5237 {
5238 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
5239
5240 switch (s)
5241 {
5242 case TRAP_SUFFIX_NONE:
5243 return NULL;
5244
5245 case TRAP_SUFFIX_SU:
5246 if (alpha_fptm >= ALPHA_FPTM_SU)
5247 return "su";
5248 return NULL;
5249
5250 case TRAP_SUFFIX_SUI:
5251 if (alpha_fptm >= ALPHA_FPTM_SUI)
5252 return "sui";
5253 return NULL;
5254
5255 case TRAP_SUFFIX_V_SV:
5256 switch (alpha_fptm)
5257 {
5258 case ALPHA_FPTM_N:
5259 return NULL;
5260 case ALPHA_FPTM_U:
5261 return "v";
5262 case ALPHA_FPTM_SU:
5263 case ALPHA_FPTM_SUI:
5264 return "sv";
5265 }
5266 break;
5267
5268 case TRAP_SUFFIX_V_SV_SVI:
5269 switch (alpha_fptm)
5270 {
5271 case ALPHA_FPTM_N:
5272 return NULL;
5273 case ALPHA_FPTM_U:
5274 return "v";
5275 case ALPHA_FPTM_SU:
5276 return "sv";
5277 case ALPHA_FPTM_SUI:
5278 return "svi";
5279 }
5280 break;
5281
5282 case TRAP_SUFFIX_U_SU_SUI:
5283 switch (alpha_fptm)
5284 {
5285 case ALPHA_FPTM_N:
5286 return NULL;
5287 case ALPHA_FPTM_U:
5288 return "u";
5289 case ALPHA_FPTM_SU:
5290 return "su";
5291 case ALPHA_FPTM_SUI:
5292 return "sui";
5293 }
5294 break;
5295 }
5296 abort ();
5297 }
5298
5299 /* Return the rounding mode suffix applicable to the current
5300 instruction, or NULL. */
5301
5302 static const char *
5303 get_round_mode_suffix (void)
5304 {
5305 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5306
5307 switch (s)
5308 {
5309 case ROUND_SUFFIX_NONE:
5310 return NULL;
5311 case ROUND_SUFFIX_NORMAL:
5312 switch (alpha_fprm)
5313 {
5314 case ALPHA_FPRM_NORM:
5315 return NULL;
5316 case ALPHA_FPRM_MINF:
5317 return "m";
5318 case ALPHA_FPRM_CHOP:
5319 return "c";
5320 case ALPHA_FPRM_DYN:
5321 return "d";
5322 }
5323 break;
5324
5325 case ROUND_SUFFIX_C:
5326 return "c";
5327 }
5328 abort ();
5329 }
5330
5331 /* Locate some local-dynamic symbol still in use by this function
5332 so that we can print its name in some movdi_er_tlsldm pattern. */
5333
5334 static int
5335 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5336 {
5337 rtx x = *px;
5338
5339 if (GET_CODE (x) == SYMBOL_REF
5340 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5341 {
5342 cfun->machine->some_ld_name = XSTR (x, 0);
5343 return 1;
5344 }
5345
5346 return 0;
5347 }
5348
5349 static const char *
5350 get_some_local_dynamic_name (void)
5351 {
5352 rtx insn;
5353
5354 if (cfun->machine->some_ld_name)
5355 return cfun->machine->some_ld_name;
5356
5357 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5358 if (INSN_P (insn)
5359 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5360 return cfun->machine->some_ld_name;
5361
5362 abort ();
5363 }
5364
5365 /* Print an operand. Recognize special options, documented below. */
5366
5367 void
5368 print_operand (FILE *file, rtx x, int code)
5369 {
5370 int i;
5371
5372 switch (code)
5373 {
5374 case '~':
5375 /* Print the assembler name of the current function. */
5376 assemble_name (file, alpha_fnname);
5377 break;
5378
5379 case '&':
5380 assemble_name (file, get_some_local_dynamic_name ());
5381 break;
5382
5383 case '/':
5384 {
5385 const char *trap = get_trap_mode_suffix ();
5386 const char *round = get_round_mode_suffix ();
5387
5388 if (trap || round)
5389 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5390 (trap ? trap : ""), (round ? round : ""));
5391 break;
5392 }
5393
5394 case ',':
5395 /* Generates single precision instruction suffix. */
5396 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5397 break;
5398
5399 case '-':
5400 /* Generates double precision instruction suffix. */
5401 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5402 break;
5403
5404 case '+':
5405 /* Generates a nop after a noreturn call at the very end of the
5406 function. */
5407 if (next_real_insn (current_output_insn) == 0)
5408 fprintf (file, "\n\tnop");
5409 break;
5410
5411 case '#':
5412 if (alpha_this_literal_sequence_number == 0)
5413 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5414 fprintf (file, "%d", alpha_this_literal_sequence_number);
5415 break;
5416
5417 case '*':
5418 if (alpha_this_gpdisp_sequence_number == 0)
5419 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5420 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5421 break;
5422
5423 case 'H':
5424 if (GET_CODE (x) == HIGH)
5425 output_addr_const (file, XEXP (x, 0));
5426 else
5427 output_operand_lossage ("invalid %%H value");
5428 break;
5429
5430 case 'J':
5431 {
5432 const char *lituse;
5433
5434 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5435 {
5436 x = XVECEXP (x, 0, 0);
5437 lituse = "lituse_tlsgd";
5438 }
5439 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5440 {
5441 x = XVECEXP (x, 0, 0);
5442 lituse = "lituse_tlsldm";
5443 }
5444 else if (GET_CODE (x) == CONST_INT)
5445 lituse = "lituse_jsr";
5446 else
5447 {
5448 output_operand_lossage ("invalid %%J value");
5449 break;
5450 }
5451
5452 if (x != const0_rtx)
5453 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5454 }
5455 break;
5456
5457 case 'r':
5458 /* If this operand is the constant zero, write it as "$31". */
5459 if (GET_CODE (x) == REG)
5460 fprintf (file, "%s", reg_names[REGNO (x)]);
5461 else if (x == CONST0_RTX (GET_MODE (x)))
5462 fprintf (file, "$31");
5463 else
5464 output_operand_lossage ("invalid %%r value");
5465 break;
5466
5467 case 'R':
5468 /* Similar, but for floating-point. */
5469 if (GET_CODE (x) == REG)
5470 fprintf (file, "%s", reg_names[REGNO (x)]);
5471 else if (x == CONST0_RTX (GET_MODE (x)))
5472 fprintf (file, "$f31");
5473 else
5474 output_operand_lossage ("invalid %%R value");
5475 break;
5476
5477 case 'N':
5478 /* Write the 1's complement of a constant. */
5479 if (GET_CODE (x) != CONST_INT)
5480 output_operand_lossage ("invalid %%N value");
5481
5482 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5483 break;
5484
5485 case 'P':
5486 /* Write 1 << C, for a constant C. */
5487 if (GET_CODE (x) != CONST_INT)
5488 output_operand_lossage ("invalid %%P value");
5489
5490 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5491 break;
5492
5493 case 'h':
5494 /* Write the high-order 16 bits of a constant, sign-extended. */
5495 if (GET_CODE (x) != CONST_INT)
5496 output_operand_lossage ("invalid %%h value");
5497
5498 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5499 break;
5500
5501 case 'L':
5502 /* Write the low-order 16 bits of a constant, sign-extended. */
5503 if (GET_CODE (x) != CONST_INT)
5504 output_operand_lossage ("invalid %%L value");
5505
5506 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5507 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5508 break;
5509
5510 case 'm':
5511 /* Write mask for ZAP insn. */
5512 if (GET_CODE (x) == CONST_DOUBLE)
5513 {
5514 HOST_WIDE_INT mask = 0;
5515 HOST_WIDE_INT value;
5516
5517 value = CONST_DOUBLE_LOW (x);
5518 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5519 i++, value >>= 8)
5520 if (value & 0xff)
5521 mask |= (1 << i);
5522
5523 value = CONST_DOUBLE_HIGH (x);
5524 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5525 i++, value >>= 8)
5526 if (value & 0xff)
5527 mask |= (1 << (i + sizeof (int)));
5528
5529 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5530 }
5531
5532 else if (GET_CODE (x) == CONST_INT)
5533 {
5534 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5535
5536 for (i = 0; i < 8; i++, value >>= 8)
5537 if (value & 0xff)
5538 mask |= (1 << i);
5539
5540 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5541 }
5542 else
5543 output_operand_lossage ("invalid %%m value");
5544 break;
5545
5546 case 'M':
5547 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5548 if (GET_CODE (x) != CONST_INT
5549 || (INTVAL (x) != 8 && INTVAL (x) != 16
5550 && INTVAL (x) != 32 && INTVAL (x) != 64))
5551 output_operand_lossage ("invalid %%M value");
5552
5553 fprintf (file, "%s",
5554 (INTVAL (x) == 8 ? "b"
5555 : INTVAL (x) == 16 ? "w"
5556 : INTVAL (x) == 32 ? "l"
5557 : "q"));
5558 break;
5559
5560 case 'U':
5561 /* Similar, except do it from the mask. */
5562 if (GET_CODE (x) == CONST_INT)
5563 {
5564 HOST_WIDE_INT value = INTVAL (x);
5565
5566 if (value == 0xff)
5567 {
5568 fputc ('b', file);
5569 break;
5570 }
5571 if (value == 0xffff)
5572 {
5573 fputc ('w', file);
5574 break;
5575 }
5576 if (value == 0xffffffff)
5577 {
5578 fputc ('l', file);
5579 break;
5580 }
5581 if (value == -1)
5582 {
5583 fputc ('q', file);
5584 break;
5585 }
5586 }
5587 else if (HOST_BITS_PER_WIDE_INT == 32
5588 && GET_CODE (x) == CONST_DOUBLE
5589 && CONST_DOUBLE_LOW (x) == 0xffffffff
5590 && CONST_DOUBLE_HIGH (x) == 0)
5591 {
5592 fputc ('l', file);
5593 break;
5594 }
5595 output_operand_lossage ("invalid %%U value");
5596 break;
5597
5598 case 's':
5599 /* Write the constant value divided by 8 for little-endian mode or
5600 (56 - value) / 8 for big-endian mode. */
5601
5602 if (GET_CODE (x) != CONST_INT
5603 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5604 ? 56
5605 : 64)
5606 || (INTVAL (x) & 7) != 0)
5607 output_operand_lossage ("invalid %%s value");
5608
5609 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5610 WORDS_BIG_ENDIAN
5611 ? (56 - INTVAL (x)) / 8
5612 : INTVAL (x) / 8);
5613 break;
5614
5615 case 'S':
5616 /* Same, except compute (64 - c) / 8 */
5617
5618 if (GET_CODE (x) != CONST_INT
5619 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5620 && (INTVAL (x) & 7) != 8)
5621 output_operand_lossage ("invalid %%s value");
5622
5623 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5624 break;
5625
5626 case 't':
5627 {
5628 /* On Unicos/Mk systems: use a DEX expression if the symbol
5629 clashes with a register name. */
5630 int dex = unicosmk_need_dex (x);
5631 if (dex)
5632 fprintf (file, "DEX(%d)", dex);
5633 else
5634 output_addr_const (file, x);
5635 }
5636 break;
5637
5638 case 'C': case 'D': case 'c': case 'd':
5639 /* Write out comparison name. */
5640 {
5641 enum rtx_code c = GET_CODE (x);
5642
5643 if (!COMPARISON_P (x))
5644 output_operand_lossage ("invalid %%C value");
5645
5646 else if (code == 'D')
5647 c = reverse_condition (c);
5648 else if (code == 'c')
5649 c = swap_condition (c);
5650 else if (code == 'd')
5651 c = swap_condition (reverse_condition (c));
5652
5653 if (c == LEU)
5654 fprintf (file, "ule");
5655 else if (c == LTU)
5656 fprintf (file, "ult");
5657 else if (c == UNORDERED)
5658 fprintf (file, "un");
5659 else
5660 fprintf (file, "%s", GET_RTX_NAME (c));
5661 }
5662 break;
5663
5664 case 'E':
5665 /* Write the divide or modulus operator. */
5666 switch (GET_CODE (x))
5667 {
5668 case DIV:
5669 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5670 break;
5671 case UDIV:
5672 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5673 break;
5674 case MOD:
5675 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5676 break;
5677 case UMOD:
5678 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5679 break;
5680 default:
5681 output_operand_lossage ("invalid %%E value");
5682 break;
5683 }
5684 break;
5685
5686 case 'A':
5687 /* Write "_u" for unaligned access. */
5688 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5689 fprintf (file, "_u");
5690 break;
5691
5692 case 0:
5693 if (GET_CODE (x) == REG)
5694 fprintf (file, "%s", reg_names[REGNO (x)]);
5695 else if (GET_CODE (x) == MEM)
5696 output_address (XEXP (x, 0));
5697 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5698 {
5699 switch (XINT (XEXP (x, 0), 1))
5700 {
5701 case UNSPEC_DTPREL:
5702 case UNSPEC_TPREL:
5703 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5704 break;
5705 default:
5706 output_operand_lossage ("unknown relocation unspec");
5707 break;
5708 }
5709 }
5710 else
5711 output_addr_const (file, x);
5712 break;
5713
5714 default:
5715 output_operand_lossage ("invalid %%xn code");
5716 }
5717 }
5718
5719 void
5720 print_operand_address (FILE *file, rtx addr)
5721 {
5722 int basereg = 31;
5723 HOST_WIDE_INT offset = 0;
5724
5725 if (GET_CODE (addr) == AND)
5726 addr = XEXP (addr, 0);
5727
5728 if (GET_CODE (addr) == PLUS
5729 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5730 {
5731 offset = INTVAL (XEXP (addr, 1));
5732 addr = XEXP (addr, 0);
5733 }
5734
5735 if (GET_CODE (addr) == LO_SUM)
5736 {
5737 const char *reloc16, *reloclo;
5738 rtx op1 = XEXP (addr, 1);
5739
5740 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5741 {
5742 op1 = XEXP (op1, 0);
5743 switch (XINT (op1, 1))
5744 {
5745 case UNSPEC_DTPREL:
5746 reloc16 = NULL;
5747 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5748 break;
5749 case UNSPEC_TPREL:
5750 reloc16 = NULL;
5751 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5752 break;
5753 default:
5754 output_operand_lossage ("unknown relocation unspec");
5755 return;
5756 }
5757
5758 output_addr_const (file, XVECEXP (op1, 0, 0));
5759 }
5760 else
5761 {
5762 reloc16 = "gprel";
5763 reloclo = "gprellow";
5764 output_addr_const (file, op1);
5765 }
5766
5767 if (offset)
5768 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5769
5770 addr = XEXP (addr, 0);
5771 if (GET_CODE (addr) == REG)
5772 basereg = REGNO (addr);
5773 else if (GET_CODE (addr) == SUBREG
5774 && GET_CODE (SUBREG_REG (addr)) == REG)
5775 basereg = subreg_regno (addr);
5776 else
5777 abort ();
5778
5779 fprintf (file, "($%d)\t\t!%s", basereg,
5780 (basereg == 29 ? reloc16 : reloclo));
5781 return;
5782 }
5783
5784 if (GET_CODE (addr) == REG)
5785 basereg = REGNO (addr);
5786 else if (GET_CODE (addr) == SUBREG
5787 && GET_CODE (SUBREG_REG (addr)) == REG)
5788 basereg = subreg_regno (addr);
5789 else if (GET_CODE (addr) == CONST_INT)
5790 offset = INTVAL (addr);
5791
5792 #if TARGET_ABI_OPEN_VMS
5793 else if (GET_CODE (addr) == SYMBOL_REF)
5794 {
5795 fprintf (file, "%s", XSTR (addr, 0));
5796 return;
5797 }
5798 else if (GET_CODE (addr) == CONST
5799 && GET_CODE (XEXP (addr, 0)) == PLUS
5800 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
5801 {
5802 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5803 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5804 INTVAL (XEXP (XEXP (addr, 0), 1)));
5805 return;
5806 }
5807 #endif
5808
5809 else
5810 abort ();
5811
5812 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5813 }
5814 \f
5815 /* Emit RTL insns to initialize the variable parts of a trampoline at
5816 TRAMP. FNADDR is an RTX for the address of the function's pure
5817 code. CXT is an RTX for the static chain value for the function.
5818
5819 The three offset parameters are for the individual template's
5820 layout. A JMPOFS < 0 indicates that the trampoline does not
5821 contain instructions at all.
5822
5823 We assume here that a function will be called many more times than
5824 its address is taken (e.g., it might be passed to qsort), so we
5825 take the trouble to initialize the "hint" field in the JMP insn.
5826 Note that the hint field is PC (new) + 4 * bits 13:0. */
5827
5828 void
5829 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5830 int fnofs, int cxtofs, int jmpofs)
5831 {
5832 rtx temp, temp1, addr;
5833 /* VMS really uses DImode pointers in memory at this point. */
5834 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5835
5836 #ifdef POINTERS_EXTEND_UNSIGNED
5837 fnaddr = convert_memory_address (mode, fnaddr);
5838 cxt = convert_memory_address (mode, cxt);
5839 #endif
5840
5841 /* Store function address and CXT. */
5842 addr = memory_address (mode, plus_constant (tramp, fnofs));
5843 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5844 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5845 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5846
5847 /* This has been disabled since the hint only has a 32k range, and in
5848 no existing OS is the stack within 32k of the text segment. */
5849 if (0 && jmpofs >= 0)
5850 {
5851 /* Compute hint value. */
5852 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5853 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5854 OPTAB_WIDEN);
5855 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5856 build_int_2 (2, 0), NULL_RTX, 1);
5857 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5858 GEN_INT (0x3fff), 0);
5859
5860 /* Merge in the hint. */
5861 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5862 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5863 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5864 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5865 OPTAB_WIDEN);
5866 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5867 }
5868
5869 #ifdef TRANSFER_FROM_TRAMPOLINE
5870 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5871 0, VOIDmode, 1, tramp, Pmode);
5872 #endif
5873
5874 if (jmpofs >= 0)
5875 emit_insn (gen_imb ());
5876 }
5877 \f
5878 /* Determine where to put an argument to a function.
5879 Value is zero to push the argument on the stack,
5880 or a hard register in which to store the argument.
5881
5882 MODE is the argument's machine mode.
5883 TYPE is the data type of the argument (as a tree).
5884 This is null for libcalls where that information may
5885 not be available.
5886 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5887 the preceding args and about the function being called.
5888 NAMED is nonzero if this argument is a named parameter
5889 (otherwise it is an extra parameter matching an ellipsis).
5890
5891 On Alpha the first 6 words of args are normally in registers
5892 and the rest are pushed. */
5893
5894 rtx
5895 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5896 int named ATTRIBUTE_UNUSED)
5897 {
5898 int basereg;
5899 int num_args;
5900
5901 /* Don't get confused and pass small structures in FP registers. */
5902 if (type && AGGREGATE_TYPE_P (type))
5903 basereg = 16;
5904 else
5905 {
5906 #ifdef ENABLE_CHECKING
5907 /* With SPLIT_COMPLEX_ARGS, we shouldn't see any raw complex
5908 values here. */
5909 if (COMPLEX_MODE_P (mode))
5910 abort ();
5911 #endif
5912
5913 /* Set up defaults for FP operands passed in FP registers, and
5914 integral operands passed in integer registers. */
5915 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5916 basereg = 32 + 16;
5917 else
5918 basereg = 16;
5919 }
5920
5921 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5922 the three platforms, so we can't avoid conditional compilation. */
5923 #if TARGET_ABI_OPEN_VMS
5924 {
5925 if (mode == VOIDmode)
5926 return alpha_arg_info_reg_val (cum);
5927
5928 num_args = cum.num_args;
5929 if (num_args >= 6 || MUST_PASS_IN_STACK (mode, type))
5930 return NULL_RTX;
5931 }
5932 #elif TARGET_ABI_UNICOSMK
5933 {
5934 int size;
5935
5936 /* If this is the last argument, generate the call info word (CIW). */
5937 /* ??? We don't include the caller's line number in the CIW because
5938 I don't know how to determine it if debug infos are turned off. */
5939 if (mode == VOIDmode)
5940 {
5941 int i;
5942 HOST_WIDE_INT lo;
5943 HOST_WIDE_INT hi;
5944 rtx ciw;
5945
5946 lo = 0;
5947
5948 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5949 if (cum.reg_args_type[i])
5950 lo |= (1 << (7 - i));
5951
5952 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5953 lo |= 7;
5954 else
5955 lo |= cum.num_reg_words;
5956
5957 #if HOST_BITS_PER_WIDE_INT == 32
5958 hi = (cum.num_args << 20) | cum.num_arg_words;
5959 #else
5960 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5961 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5962 hi = 0;
5963 #endif
5964 ciw = immed_double_const (lo, hi, DImode);
5965
5966 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5967 UNSPEC_UMK_LOAD_CIW);
5968 }
5969
5970 size = ALPHA_ARG_SIZE (mode, type, named);
5971 num_args = cum.num_reg_words;
5972 if (MUST_PASS_IN_STACK (mode, type)
5973 || cum.num_reg_words + size > 6 || cum.force_stack)
5974 return NULL_RTX;
5975 else if (type && TYPE_MODE (type) == BLKmode)
5976 {
5977 rtx reg1, reg2;
5978
5979 reg1 = gen_rtx_REG (DImode, num_args + 16);
5980 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5981
5982 /* The argument fits in two registers. Note that we still need to
5983 reserve a register for empty structures. */
5984 if (size == 0)
5985 return NULL_RTX;
5986 else if (size == 1)
5987 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5988 else
5989 {
5990 reg2 = gen_rtx_REG (DImode, num_args + 17);
5991 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5992 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5993 }
5994 }
5995 }
5996 #elif TARGET_ABI_OSF
5997 {
5998 if (cum >= 6)
5999 return NULL_RTX;
6000 num_args = cum;
6001
6002 /* VOID is passed as a special flag for "last argument". */
6003 if (type == void_type_node)
6004 basereg = 16;
6005 else if (MUST_PASS_IN_STACK (mode, type))
6006 return NULL_RTX;
6007 else if (FUNCTION_ARG_PASS_BY_REFERENCE (cum, mode, type, named))
6008 basereg = 16;
6009 }
6010 #else
6011 #error Unhandled ABI
6012 #endif
6013
6014 return gen_rtx_REG (mode, num_args + basereg);
6015 }
6016
6017 /* Return true if TYPE must be returned in memory, instead of in registers. */
6018
6019 static bool
6020 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
6021 {
6022 enum machine_mode mode = VOIDmode;
6023 int size;
6024
6025 if (type)
6026 {
6027 mode = TYPE_MODE (type);
6028
6029 /* All aggregates are returned in memory. */
6030 if (AGGREGATE_TYPE_P (type))
6031 return true;
6032 }
6033
6034 size = GET_MODE_SIZE (mode);
6035 switch (GET_MODE_CLASS (mode))
6036 {
6037 case MODE_VECTOR_FLOAT:
6038 /* Pass all float vectors in memory, like an aggregate. */
6039 return true;
6040
6041 case MODE_COMPLEX_FLOAT:
6042 /* We judge complex floats on the size of their element,
6043 not the size of the whole type. */
6044 size = GET_MODE_UNIT_SIZE (mode);
6045 break;
6046
6047 case MODE_INT:
6048 case MODE_FLOAT:
6049 case MODE_COMPLEX_INT:
6050 case MODE_VECTOR_INT:
6051 break;
6052
6053 default:
6054 /* ??? We get called on all sorts of random stuff from
6055 aggregate_value_p. We can't abort, but it's not clear
6056 what's safe to return. Pretend it's a struct I guess. */
6057 return true;
6058 }
6059
6060 /* Otherwise types must fit in one register. */
6061 return size > UNITS_PER_WORD;
6062 }
6063
6064 /* Define how to find the value returned by a function. VALTYPE is the
6065 data type of the value (as a tree). If the precise function being
6066 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
6067 MODE is set instead of VALTYPE for libcalls.
6068
6069 On Alpha the value is found in $0 for integer functions and
6070 $f0 for floating-point functions. */
6071
6072 rtx
6073 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6074 enum machine_mode mode)
6075 {
6076 unsigned int regnum;
6077 enum mode_class class;
6078
6079 #ifdef ENABLE_CHECKING
6080 if (valtype && alpha_return_in_memory (valtype, func))
6081 abort ();
6082 #endif
6083
6084 if (valtype)
6085 mode = TYPE_MODE (valtype);
6086
6087 class = GET_MODE_CLASS (mode);
6088 switch (class)
6089 {
6090 case MODE_INT:
6091 /* Do the same thing as PROMOTE_MODE. */
6092 mode = DImode;
6093 /* FALLTHRU */
6094
6095 case MODE_COMPLEX_INT:
6096 case MODE_VECTOR_INT:
6097 regnum = 0;
6098 break;
6099
6100 case MODE_FLOAT:
6101 regnum = 32;
6102 break;
6103
6104 case MODE_COMPLEX_FLOAT:
6105 {
6106 enum machine_mode cmode = GET_MODE_INNER (mode);
6107
6108 return gen_rtx_PARALLEL
6109 (VOIDmode,
6110 gen_rtvec (2,
6111 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
6112 const0_rtx),
6113 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
6114 GEN_INT (GET_MODE_SIZE (cmode)))));
6115 }
6116
6117 default:
6118 abort ();
6119 }
6120
6121 return gen_rtx_REG (mode, regnum);
6122 }
6123
6124 static tree
6125 alpha_build_builtin_va_list (void)
6126 {
6127 tree base, ofs, space, record, type_decl;
6128
6129 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6130 return ptr_type_node;
6131
6132 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6133 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6134 TREE_CHAIN (record) = type_decl;
6135 TYPE_NAME (record) = type_decl;
6136
6137 /* C++? SET_IS_AGGR_TYPE (record, 1); */
6138
6139 /* Dummy field to prevent alignment warnings. */
6140 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
6141 DECL_FIELD_CONTEXT (space) = record;
6142 DECL_ARTIFICIAL (space) = 1;
6143 DECL_IGNORED_P (space) = 1;
6144
6145 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
6146 integer_type_node);
6147 DECL_FIELD_CONTEXT (ofs) = record;
6148 TREE_CHAIN (ofs) = space;
6149
6150 base = build_decl (FIELD_DECL, get_identifier ("__base"),
6151 ptr_type_node);
6152 DECL_FIELD_CONTEXT (base) = record;
6153 TREE_CHAIN (base) = ofs;
6154
6155 TYPE_FIELDS (record) = base;
6156 layout_type (record);
6157
6158 return record;
6159 }
6160
6161 /* Perform any needed actions needed for a function that is receiving a
6162 variable number of arguments. */
6163
6164 static void
6165 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
6166 enum machine_mode mode ATTRIBUTE_UNUSED,
6167 tree type ATTRIBUTE_UNUSED,
6168 int *pretend_size, int no_rtl)
6169 {
6170 #if TARGET_ABI_UNICOSMK
6171 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6172 arguments on the stack. Unfortunately, it doesn't always store the first
6173 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6174 with stdargs as we always have at least one named argument there. */
6175 int num_reg_words = pcum->num_reg_words;
6176 if (num_reg_words < 6)
6177 {
6178 if (!no_rtl)
6179 {
6180 emit_insn (gen_umk_mismatch_args (GEN_INT (num_reg_words + 1)));
6181 emit_insn (gen_arg_home_umk ());
6182 }
6183 *pretend_size = 0;
6184 }
6185 #elif TARGET_ABI_OPEN_VMS
6186 /* For VMS, we allocate space for all 6 arg registers plus a count.
6187
6188 However, if NO registers need to be saved, don't allocate any space.
6189 This is not only because we won't need the space, but because AP
6190 includes the current_pretend_args_size and we don't want to mess up
6191 any ap-relative addresses already made. */
6192 if (pcum->num_args < 6)
6193 {
6194 if (!no_rtl)
6195 {
6196 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6197 emit_insn (gen_arg_home ());
6198 }
6199 *pretend_size = 7 * UNITS_PER_WORD;
6200 }
6201 #else
6202 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6203 only push those that are remaining. However, if NO registers need to
6204 be saved, don't allocate any space. This is not only because we won't
6205 need the space, but because AP includes the current_pretend_args_size
6206 and we don't want to mess up any ap-relative addresses already made.
6207
6208 If we are not to use the floating-point registers, save the integer
6209 registers where we would put the floating-point registers. This is
6210 not the most efficient way to implement varargs with just one register
6211 class, but it isn't worth doing anything more efficient in this rare
6212 case. */
6213 CUMULATIVE_ARGS cum = *pcum;
6214
6215 if (cum >= 6)
6216 return;
6217
6218 if (!no_rtl)
6219 {
6220 int set = get_varargs_alias_set ();
6221 rtx tmp;
6222
6223 tmp = gen_rtx_MEM (BLKmode,
6224 plus_constant (virtual_incoming_args_rtx,
6225 (cum + 6) * UNITS_PER_WORD));
6226 set_mem_alias_set (tmp, set);
6227 move_block_from_reg (16 + cum, tmp, 6 - cum);
6228
6229 tmp = gen_rtx_MEM (BLKmode,
6230 plus_constant (virtual_incoming_args_rtx,
6231 cum * UNITS_PER_WORD));
6232 set_mem_alias_set (tmp, set);
6233 move_block_from_reg (16 + (TARGET_FPREGS ? 32 : 0) + cum, tmp,
6234 6 - cum);
6235 }
6236 *pretend_size = 12 * UNITS_PER_WORD;
6237 #endif
6238 }
6239
6240 void
6241 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6242 {
6243 HOST_WIDE_INT offset;
6244 tree t, offset_field, base_field;
6245
6246 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6247 return;
6248
6249 if (TARGET_ABI_UNICOSMK)
6250 std_expand_builtin_va_start (valist, nextarg);
6251
6252 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6253 up by 48, storing fp arg registers in the first 48 bytes, and the
6254 integer arg registers in the next 48 bytes. This is only done,
6255 however, if any integer registers need to be stored.
6256
6257 If no integer registers need be stored, then we must subtract 48
6258 in order to account for the integer arg registers which are counted
6259 in argsize above, but which are not actually stored on the stack.
6260 Must further be careful here about structures straddling the last
6261 integer argument register; that futzes with pretend_args_size,
6262 which changes the meaning of AP. */
6263
6264 if (NUM_ARGS <= 6)
6265 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6266 else
6267 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6268
6269 if (TARGET_ABI_OPEN_VMS)
6270 {
6271 nextarg = plus_constant (nextarg, offset);
6272 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6273 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6274 make_tree (ptr_type_node, nextarg));
6275 TREE_SIDE_EFFECTS (t) = 1;
6276
6277 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6278 }
6279 else
6280 {
6281 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6282 offset_field = TREE_CHAIN (base_field);
6283
6284 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6285 valist, base_field);
6286 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6287 valist, offset_field);
6288
6289 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6290 t = build (PLUS_EXPR, ptr_type_node, t, build_int_2 (offset, 0));
6291 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6292 TREE_SIDE_EFFECTS (t) = 1;
6293 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6294
6295 t = build_int_2 (NUM_ARGS * UNITS_PER_WORD, 0);
6296 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6297 TREE_SIDE_EFFECTS (t) = 1;
6298 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6299 }
6300 }
6301
6302 rtx
6303 alpha_va_arg (tree valist, tree type)
6304 {
6305 rtx addr;
6306 tree t, type_size, rounded_size;
6307 tree offset_field, base_field, addr_tree, addend;
6308 tree wide_type, wide_ofs;
6309 int indirect = 0;
6310
6311 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6312 return std_expand_builtin_va_arg (valist, type);
6313
6314 if (type == error_mark_node
6315 || (type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type))) == NULL
6316 || TREE_OVERFLOW (type_size))
6317 rounded_size = size_zero_node;
6318 else
6319 rounded_size = fold (build (MULT_EXPR, sizetype,
6320 fold (build (TRUNC_DIV_EXPR, sizetype,
6321 fold (build (PLUS_EXPR, sizetype,
6322 type_size,
6323 size_int (7))),
6324 size_int (8))),
6325 size_int (8)));
6326
6327 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6328 offset_field = TREE_CHAIN (base_field);
6329
6330 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6331 valist, base_field);
6332 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6333 valist, offset_field);
6334
6335 /* If the type could not be passed in registers, skip the block
6336 reserved for the registers. */
6337 if (MUST_PASS_IN_STACK (TYPE_MODE (type), type))
6338 {
6339 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6340 build (MAX_EXPR, TREE_TYPE (offset_field),
6341 offset_field, build_int_2 (6*8, 0)));
6342 TREE_SIDE_EFFECTS (t) = 1;
6343 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6344 }
6345
6346 wide_type = make_signed_type (64);
6347 wide_ofs = save_expr (build1 (CONVERT_EXPR, wide_type, offset_field));
6348
6349 addend = wide_ofs;
6350
6351 if (TYPE_MODE (type) == TFmode || TYPE_MODE (type) == TCmode)
6352 {
6353 indirect = 1;
6354 rounded_size = size_int (UNITS_PER_WORD);
6355 }
6356 else if (TREE_CODE (type) == COMPLEX_TYPE)
6357 {
6358 rtx real_part, imag_part, value, tmp;
6359
6360 real_part = alpha_va_arg (valist, TREE_TYPE (type));
6361 imag_part = alpha_va_arg (valist, TREE_TYPE (type));
6362
6363 /* ??? Most irritatingly, we're not returning the value here,
6364 but the address. Since real_part and imag_part are not
6365 necessarily contiguous, we must copy to local storage. */
6366
6367 real_part = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (type)), real_part);
6368 imag_part = gen_rtx_MEM (TYPE_MODE (TREE_TYPE (type)), imag_part);
6369 value = gen_rtx_CONCAT (TYPE_MODE (type), real_part, imag_part);
6370
6371 tmp = assign_temp (type, 0, 1, 0);
6372 emit_move_insn (tmp, value);
6373
6374 return XEXP (tmp, 0);
6375 }
6376 else if (TREE_CODE (type) == REAL_TYPE)
6377 {
6378 tree fpaddend, cond;
6379
6380 fpaddend = fold (build (PLUS_EXPR, TREE_TYPE (addend),
6381 addend, build_int_2 (-6*8, 0)));
6382
6383 cond = fold (build (LT_EXPR, integer_type_node,
6384 wide_ofs, build_int_2 (6*8, 0)));
6385
6386 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6387 fpaddend, addend));
6388 }
6389
6390 addr_tree = build (PLUS_EXPR, TREE_TYPE (base_field),
6391 base_field, addend);
6392
6393 addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
6394 addr = copy_to_reg (addr);
6395
6396 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field,
6397 build (PLUS_EXPR, TREE_TYPE (offset_field),
6398 offset_field, rounded_size));
6399 TREE_SIDE_EFFECTS (t) = 1;
6400 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6401
6402 if (indirect)
6403 {
6404 addr = force_reg (Pmode, addr);
6405 addr = gen_rtx_MEM (Pmode, addr);
6406 }
6407
6408 return addr;
6409 }
6410 \f
6411 /* Builtins. */
6412
6413 enum alpha_builtin
6414 {
6415 ALPHA_BUILTIN_CMPBGE,
6416 ALPHA_BUILTIN_EXTBL,
6417 ALPHA_BUILTIN_EXTWL,
6418 ALPHA_BUILTIN_EXTLL,
6419 ALPHA_BUILTIN_EXTQL,
6420 ALPHA_BUILTIN_EXTWH,
6421 ALPHA_BUILTIN_EXTLH,
6422 ALPHA_BUILTIN_EXTQH,
6423 ALPHA_BUILTIN_INSBL,
6424 ALPHA_BUILTIN_INSWL,
6425 ALPHA_BUILTIN_INSLL,
6426 ALPHA_BUILTIN_INSQL,
6427 ALPHA_BUILTIN_INSWH,
6428 ALPHA_BUILTIN_INSLH,
6429 ALPHA_BUILTIN_INSQH,
6430 ALPHA_BUILTIN_MSKBL,
6431 ALPHA_BUILTIN_MSKWL,
6432 ALPHA_BUILTIN_MSKLL,
6433 ALPHA_BUILTIN_MSKQL,
6434 ALPHA_BUILTIN_MSKWH,
6435 ALPHA_BUILTIN_MSKLH,
6436 ALPHA_BUILTIN_MSKQH,
6437 ALPHA_BUILTIN_UMULH,
6438 ALPHA_BUILTIN_ZAP,
6439 ALPHA_BUILTIN_ZAPNOT,
6440 ALPHA_BUILTIN_AMASK,
6441 ALPHA_BUILTIN_IMPLVER,
6442 ALPHA_BUILTIN_RPCC,
6443 ALPHA_BUILTIN_THREAD_POINTER,
6444 ALPHA_BUILTIN_SET_THREAD_POINTER,
6445
6446 /* TARGET_MAX */
6447 ALPHA_BUILTIN_MINUB8,
6448 ALPHA_BUILTIN_MINSB8,
6449 ALPHA_BUILTIN_MINUW4,
6450 ALPHA_BUILTIN_MINSW4,
6451 ALPHA_BUILTIN_MAXUB8,
6452 ALPHA_BUILTIN_MAXSB8,
6453 ALPHA_BUILTIN_MAXUW4,
6454 ALPHA_BUILTIN_MAXSW4,
6455 ALPHA_BUILTIN_PERR,
6456 ALPHA_BUILTIN_PKLB,
6457 ALPHA_BUILTIN_PKWB,
6458 ALPHA_BUILTIN_UNPKBL,
6459 ALPHA_BUILTIN_UNPKBW,
6460
6461 /* TARGET_CIX */
6462 ALPHA_BUILTIN_CTTZ,
6463 ALPHA_BUILTIN_CTLZ,
6464 ALPHA_BUILTIN_CTPOP,
6465
6466 ALPHA_BUILTIN_max
6467 };
6468
6469 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6470 CODE_FOR_builtin_cmpbge,
6471 CODE_FOR_builtin_extbl,
6472 CODE_FOR_builtin_extwl,
6473 CODE_FOR_builtin_extll,
6474 CODE_FOR_builtin_extql,
6475 CODE_FOR_builtin_extwh,
6476 CODE_FOR_builtin_extlh,
6477 CODE_FOR_builtin_extqh,
6478 CODE_FOR_builtin_insbl,
6479 CODE_FOR_builtin_inswl,
6480 CODE_FOR_builtin_insll,
6481 CODE_FOR_builtin_insql,
6482 CODE_FOR_builtin_inswh,
6483 CODE_FOR_builtin_inslh,
6484 CODE_FOR_builtin_insqh,
6485 CODE_FOR_builtin_mskbl,
6486 CODE_FOR_builtin_mskwl,
6487 CODE_FOR_builtin_mskll,
6488 CODE_FOR_builtin_mskql,
6489 CODE_FOR_builtin_mskwh,
6490 CODE_FOR_builtin_msklh,
6491 CODE_FOR_builtin_mskqh,
6492 CODE_FOR_umuldi3_highpart,
6493 CODE_FOR_builtin_zap,
6494 CODE_FOR_builtin_zapnot,
6495 CODE_FOR_builtin_amask,
6496 CODE_FOR_builtin_implver,
6497 CODE_FOR_builtin_rpcc,
6498 CODE_FOR_load_tp,
6499 CODE_FOR_set_tp,
6500
6501 /* TARGET_MAX */
6502 CODE_FOR_builtin_minub8,
6503 CODE_FOR_builtin_minsb8,
6504 CODE_FOR_builtin_minuw4,
6505 CODE_FOR_builtin_minsw4,
6506 CODE_FOR_builtin_maxub8,
6507 CODE_FOR_builtin_maxsb8,
6508 CODE_FOR_builtin_maxuw4,
6509 CODE_FOR_builtin_maxsw4,
6510 CODE_FOR_builtin_perr,
6511 CODE_FOR_builtin_pklb,
6512 CODE_FOR_builtin_pkwb,
6513 CODE_FOR_builtin_unpkbl,
6514 CODE_FOR_builtin_unpkbw,
6515
6516 /* TARGET_CIX */
6517 CODE_FOR_builtin_cttz,
6518 CODE_FOR_builtin_ctlz,
6519 CODE_FOR_builtin_ctpop
6520 };
6521
6522 struct alpha_builtin_def
6523 {
6524 const char *name;
6525 enum alpha_builtin code;
6526 unsigned int target_mask;
6527 };
6528
6529 static struct alpha_builtin_def const zero_arg_builtins[] = {
6530 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0 },
6531 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0 }
6532 };
6533
6534 static struct alpha_builtin_def const one_arg_builtins[] = {
6535 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0 },
6536 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX },
6537 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX },
6538 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX },
6539 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX },
6540 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX },
6541 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX },
6542 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX }
6543 };
6544
6545 static struct alpha_builtin_def const two_arg_builtins[] = {
6546 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0 },
6547 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0 },
6548 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0 },
6549 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0 },
6550 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0 },
6551 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0 },
6552 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0 },
6553 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0 },
6554 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0 },
6555 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0 },
6556 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0 },
6557 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0 },
6558 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0 },
6559 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0 },
6560 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0 },
6561 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0 },
6562 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0 },
6563 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0 },
6564 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0 },
6565 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0 },
6566 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0 },
6567 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0 },
6568 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0 },
6569 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0 },
6570 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0 },
6571 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX },
6572 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX },
6573 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX },
6574 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX },
6575 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX },
6576 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX },
6577 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX },
6578 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX },
6579 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX }
6580 };
6581
6582 static void
6583 alpha_init_builtins (void)
6584 {
6585 const struct alpha_builtin_def *p;
6586 tree ftype;
6587 size_t i;
6588
6589 ftype = build_function_type (long_integer_type_node, void_list_node);
6590
6591 p = zero_arg_builtins;
6592 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6593 if ((target_flags & p->target_mask) == p->target_mask)
6594 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6595 NULL, NULL_TREE);
6596
6597 ftype = build_function_type_list (long_integer_type_node,
6598 long_integer_type_node, NULL_TREE);
6599
6600 p = one_arg_builtins;
6601 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6602 if ((target_flags & p->target_mask) == p->target_mask)
6603 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6604 NULL, NULL_TREE);
6605
6606 ftype = build_function_type_list (long_integer_type_node,
6607 long_integer_type_node,
6608 long_integer_type_node, NULL_TREE);
6609
6610 p = two_arg_builtins;
6611 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6612 if ((target_flags & p->target_mask) == p->target_mask)
6613 builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6614 NULL, NULL_TREE);
6615
6616 ftype = build_function_type (ptr_type_node, void_list_node);
6617 builtin_function ("__builtin_thread_pointer", ftype,
6618 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6619 NULL, NULL_TREE);
6620
6621 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6622 builtin_function ("__builtin_set_thread_pointer", ftype,
6623 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6624 NULL, NULL_TREE);
6625 }
6626
6627 /* Expand an expression EXP that calls a built-in function,
6628 with result going to TARGET if that's convenient
6629 (and in mode MODE if that's convenient).
6630 SUBTARGET may be used as the target for computing one of EXP's operands.
6631 IGNORE is nonzero if the value is to be ignored. */
6632
6633 static rtx
6634 alpha_expand_builtin (tree exp, rtx target,
6635 rtx subtarget ATTRIBUTE_UNUSED,
6636 enum machine_mode mode ATTRIBUTE_UNUSED,
6637 int ignore ATTRIBUTE_UNUSED)
6638 {
6639 #define MAX_ARGS 2
6640
6641 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6642 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6643 tree arglist = TREE_OPERAND (exp, 1);
6644 enum insn_code icode;
6645 rtx op[MAX_ARGS], pat;
6646 int arity;
6647 bool nonvoid;
6648
6649 if (fcode >= ALPHA_BUILTIN_max)
6650 internal_error ("bad builtin fcode");
6651 icode = code_for_builtin[fcode];
6652 if (icode == 0)
6653 internal_error ("bad builtin fcode");
6654
6655 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6656
6657 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6658 arglist;
6659 arglist = TREE_CHAIN (arglist), arity++)
6660 {
6661 const struct insn_operand_data *insn_op;
6662
6663 tree arg = TREE_VALUE (arglist);
6664 if (arg == error_mark_node)
6665 return NULL_RTX;
6666 if (arity > MAX_ARGS)
6667 return NULL_RTX;
6668
6669 insn_op = &insn_data[icode].operand[arity + nonvoid];
6670
6671 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6672
6673 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6674 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6675 }
6676
6677 if (nonvoid)
6678 {
6679 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6680 if (!target
6681 || GET_MODE (target) != tmode
6682 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6683 target = gen_reg_rtx (tmode);
6684 }
6685
6686 switch (arity)
6687 {
6688 case 0:
6689 pat = GEN_FCN (icode) (target);
6690 break;
6691 case 1:
6692 if (nonvoid)
6693 pat = GEN_FCN (icode) (target, op[0]);
6694 else
6695 pat = GEN_FCN (icode) (op[0]);
6696 break;
6697 case 2:
6698 pat = GEN_FCN (icode) (target, op[0], op[1]);
6699 break;
6700 default:
6701 abort ();
6702 }
6703 if (!pat)
6704 return NULL_RTX;
6705 emit_insn (pat);
6706
6707 if (nonvoid)
6708 return target;
6709 else
6710 return const0_rtx;
6711 }
6712 \f
6713 /* This page contains routines that are used to determine what the function
6714 prologue and epilogue code will do and write them out. */
6715
6716 /* Compute the size of the save area in the stack. */
6717
6718 /* These variables are used for communication between the following functions.
6719 They indicate various things about the current function being compiled
6720 that are used to tell what kind of prologue, epilogue and procedure
6721 descriptor to generate. */
6722
6723 /* Nonzero if we need a stack procedure. */
6724 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6725 static enum alpha_procedure_types alpha_procedure_type;
6726
6727 /* Register number (either FP or SP) that is used to unwind the frame. */
6728 static int vms_unwind_regno;
6729
6730 /* Register number used to save FP. We need not have one for RA since
6731 we don't modify it for register procedures. This is only defined
6732 for register frame procedures. */
6733 static int vms_save_fp_regno;
6734
6735 /* Register number used to reference objects off our PV. */
6736 static int vms_base_regno;
6737
6738 /* Compute register masks for saved registers. */
6739
6740 static void
6741 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
6742 {
6743 unsigned long imask = 0;
6744 unsigned long fmask = 0;
6745 unsigned int i;
6746
6747 /* When outputting a thunk, we don't have valid register life info,
6748 but assemble_start_function wants to output .frame and .mask
6749 directives. */
6750 if (current_function_is_thunk)
6751 {
6752 *imaskP = 0;
6753 *fmaskP = 0;
6754 return;
6755 }
6756
6757 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6758 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
6759
6760 /* One for every register we have to save. */
6761 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6762 if (! fixed_regs[i] && ! call_used_regs[i]
6763 && regs_ever_live[i] && i != REG_RA
6764 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
6765 {
6766 if (i < 32)
6767 imask |= (1UL << i);
6768 else
6769 fmask |= (1UL << (i - 32));
6770 }
6771
6772 /* We need to restore these for the handler. */
6773 if (current_function_calls_eh_return)
6774 {
6775 for (i = 0; ; ++i)
6776 {
6777 unsigned regno = EH_RETURN_DATA_REGNO (i);
6778 if (regno == INVALID_REGNUM)
6779 break;
6780 imask |= 1UL << regno;
6781 }
6782
6783 /* Glibc likes to use $31 as an unwind stopper for crt0. To
6784 avoid hackery in unwind-dw2.c, we need to actively store a
6785 zero in the prologue of _Unwind_RaiseException et al. */
6786 imask |= 1UL << 31;
6787 }
6788
6789 /* If any register spilled, then spill the return address also. */
6790 /* ??? This is required by the Digital stack unwind specification
6791 and isn't needed if we're doing Dwarf2 unwinding. */
6792 if (imask || fmask || alpha_ra_ever_killed ())
6793 imask |= (1UL << REG_RA);
6794
6795 *imaskP = imask;
6796 *fmaskP = fmask;
6797 }
6798
6799 int
6800 alpha_sa_size (void)
6801 {
6802 unsigned long mask[2];
6803 int sa_size = 0;
6804 int i, j;
6805
6806 alpha_sa_mask (&mask[0], &mask[1]);
6807
6808 if (TARGET_ABI_UNICOSMK)
6809 {
6810 if (mask[0] || mask[1])
6811 sa_size = 14;
6812 }
6813 else
6814 {
6815 for (j = 0; j < 2; ++j)
6816 for (i = 0; i < 32; ++i)
6817 if ((mask[j] >> i) & 1)
6818 sa_size++;
6819 }
6820
6821 if (TARGET_ABI_UNICOSMK)
6822 {
6823 /* We might not need to generate a frame if we don't make any calls
6824 (including calls to __T3E_MISMATCH if this is a vararg function),
6825 don't have any local variables which require stack slots, don't
6826 use alloca and have not determined that we need a frame for other
6827 reasons. */
6828
6829 alpha_procedure_type
6830 = (sa_size || get_frame_size() != 0
6831 || current_function_outgoing_args_size
6832 || current_function_stdarg || current_function_calls_alloca
6833 || frame_pointer_needed)
6834 ? PT_STACK : PT_REGISTER;
6835
6836 /* Always reserve space for saving callee-saved registers if we
6837 need a frame as required by the calling convention. */
6838 if (alpha_procedure_type == PT_STACK)
6839 sa_size = 14;
6840 }
6841 else if (TARGET_ABI_OPEN_VMS)
6842 {
6843 /* Start by assuming we can use a register procedure if we don't
6844 make any calls (REG_RA not used) or need to save any
6845 registers and a stack procedure if we do. */
6846 if ((mask[0] >> REG_RA) & 1)
6847 alpha_procedure_type = PT_STACK;
6848 else if (get_frame_size() != 0)
6849 alpha_procedure_type = PT_REGISTER;
6850 else
6851 alpha_procedure_type = PT_NULL;
6852
6853 /* Don't reserve space for saving FP & RA yet. Do that later after we've
6854 made the final decision on stack procedure vs register procedure. */
6855 if (alpha_procedure_type == PT_STACK)
6856 sa_size -= 2;
6857
6858 /* Decide whether to refer to objects off our PV via FP or PV.
6859 If we need FP for something else or if we receive a nonlocal
6860 goto (which expects PV to contain the value), we must use PV.
6861 Otherwise, start by assuming we can use FP. */
6862
6863 vms_base_regno
6864 = (frame_pointer_needed
6865 || current_function_has_nonlocal_label
6866 || alpha_procedure_type == PT_STACK
6867 || current_function_outgoing_args_size)
6868 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
6869
6870 /* If we want to copy PV into FP, we need to find some register
6871 in which to save FP. */
6872
6873 vms_save_fp_regno = -1;
6874 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
6875 for (i = 0; i < 32; i++)
6876 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
6877 vms_save_fp_regno = i;
6878
6879 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
6880 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
6881 else if (alpha_procedure_type == PT_NULL)
6882 vms_base_regno = REG_PV;
6883
6884 /* Stack unwinding should be done via FP unless we use it for PV. */
6885 vms_unwind_regno = (vms_base_regno == REG_PV
6886 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
6887
6888 /* If this is a stack procedure, allow space for saving FP and RA. */
6889 if (alpha_procedure_type == PT_STACK)
6890 sa_size += 2;
6891 }
6892 else
6893 {
6894 /* Our size must be even (multiple of 16 bytes). */
6895 if (sa_size & 1)
6896 sa_size++;
6897 }
6898
6899 return sa_size * 8;
6900 }
6901
6902 /* Define the offset between two registers, one to be eliminated,
6903 and the other its replacement, at the start of a routine. */
6904
6905 HOST_WIDE_INT
6906 alpha_initial_elimination_offset (unsigned int from,
6907 unsigned int to ATTRIBUTE_UNUSED)
6908 {
6909 HOST_WIDE_INT ret;
6910
6911 ret = alpha_sa_size ();
6912 ret += ALPHA_ROUND (current_function_outgoing_args_size);
6913
6914 if (from == FRAME_POINTER_REGNUM)
6915 ;
6916 else if (from == ARG_POINTER_REGNUM)
6917 ret += (ALPHA_ROUND (get_frame_size ()
6918 + current_function_pretend_args_size)
6919 - current_function_pretend_args_size);
6920 else
6921 abort ();
6922
6923 return ret;
6924 }
6925
6926 int
6927 alpha_pv_save_size (void)
6928 {
6929 alpha_sa_size ();
6930 return alpha_procedure_type == PT_STACK ? 8 : 0;
6931 }
6932
6933 int
6934 alpha_using_fp (void)
6935 {
6936 alpha_sa_size ();
6937 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
6938 }
6939
6940 #if TARGET_ABI_OPEN_VMS
6941
6942 const struct attribute_spec vms_attribute_table[] =
6943 {
6944 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6945 { "overlaid", 0, 0, true, false, false, NULL },
6946 { "global", 0, 0, true, false, false, NULL },
6947 { "initialize", 0, 0, true, false, false, NULL },
6948 { NULL, 0, 0, false, false, false, NULL }
6949 };
6950
6951 #endif
6952
6953 static int
6954 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
6955 {
6956 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
6957 }
6958
6959 int
6960 alpha_find_lo_sum_using_gp (rtx insn)
6961 {
6962 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
6963 }
6964
6965 static int
6966 alpha_does_function_need_gp (void)
6967 {
6968 rtx insn;
6969
6970 /* The GP being variable is an OSF abi thing. */
6971 if (! TARGET_ABI_OSF)
6972 return 0;
6973
6974 /* We need the gp to load the address of __mcount. */
6975 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6976 return 1;
6977
6978 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
6979 if (current_function_is_thunk)
6980 return 1;
6981
6982 /* The nonlocal receiver pattern assumes that the gp is valid for
6983 the nested function. Reasonable because it's almost always set
6984 correctly already. For the cases where that's wrong, make sure
6985 the nested function loads its gp on entry. */
6986 if (current_function_has_nonlocal_goto)
6987 return 1;
6988
6989 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6990 Even if we are a static function, we still need to do this in case
6991 our address is taken and passed to something like qsort. */
6992
6993 push_topmost_sequence ();
6994 insn = get_insns ();
6995 pop_topmost_sequence ();
6996
6997 for (; insn; insn = NEXT_INSN (insn))
6998 if (INSN_P (insn)
6999 && GET_CODE (PATTERN (insn)) != USE
7000 && GET_CODE (PATTERN (insn)) != CLOBBER
7001 && get_attr_usegp (insn))
7002 return 1;
7003
7004 return 0;
7005 }
7006
7007 \f
7008 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7009 sequences. */
7010
7011 static rtx
7012 set_frame_related_p (void)
7013 {
7014 rtx seq = get_insns ();
7015 rtx insn;
7016
7017 end_sequence ();
7018
7019 if (!seq)
7020 return NULL_RTX;
7021
7022 if (INSN_P (seq))
7023 {
7024 insn = seq;
7025 while (insn != NULL_RTX)
7026 {
7027 RTX_FRAME_RELATED_P (insn) = 1;
7028 insn = NEXT_INSN (insn);
7029 }
7030 seq = emit_insn (seq);
7031 }
7032 else
7033 {
7034 seq = emit_insn (seq);
7035 RTX_FRAME_RELATED_P (seq) = 1;
7036 }
7037 return seq;
7038 }
7039
7040 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7041
7042 /* Write function prologue. */
7043
7044 /* On vms we have two kinds of functions:
7045
7046 - stack frame (PROC_STACK)
7047 these are 'normal' functions with local vars and which are
7048 calling other functions
7049 - register frame (PROC_REGISTER)
7050 keeps all data in registers, needs no stack
7051
7052 We must pass this to the assembler so it can generate the
7053 proper pdsc (procedure descriptor)
7054 This is done with the '.pdesc' command.
7055
7056 On not-vms, we don't really differentiate between the two, as we can
7057 simply allocate stack without saving registers. */
7058
7059 void
7060 alpha_expand_prologue (void)
7061 {
7062 /* Registers to save. */
7063 unsigned long imask = 0;
7064 unsigned long fmask = 0;
7065 /* Stack space needed for pushing registers clobbered by us. */
7066 HOST_WIDE_INT sa_size;
7067 /* Complete stack size needed. */
7068 HOST_WIDE_INT frame_size;
7069 /* Offset from base reg to register save area. */
7070 HOST_WIDE_INT reg_offset;
7071 rtx sa_reg, mem;
7072 int i;
7073
7074 sa_size = alpha_sa_size ();
7075
7076 frame_size = get_frame_size ();
7077 if (TARGET_ABI_OPEN_VMS)
7078 frame_size = ALPHA_ROUND (sa_size
7079 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7080 + frame_size
7081 + current_function_pretend_args_size);
7082 else if (TARGET_ABI_UNICOSMK)
7083 /* We have to allocate space for the DSIB if we generate a frame. */
7084 frame_size = ALPHA_ROUND (sa_size
7085 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7086 + ALPHA_ROUND (frame_size
7087 + current_function_outgoing_args_size);
7088 else
7089 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7090 + sa_size
7091 + ALPHA_ROUND (frame_size
7092 + current_function_pretend_args_size));
7093
7094 if (TARGET_ABI_OPEN_VMS)
7095 reg_offset = 8;
7096 else
7097 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7098
7099 alpha_sa_mask (&imask, &fmask);
7100
7101 /* Emit an insn to reload GP, if needed. */
7102 if (TARGET_ABI_OSF)
7103 {
7104 alpha_function_needs_gp = alpha_does_function_need_gp ();
7105 if (alpha_function_needs_gp)
7106 emit_insn (gen_prologue_ldgp ());
7107 }
7108
7109 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7110 the call to mcount ourselves, rather than having the linker do it
7111 magically in response to -pg. Since _mcount has special linkage,
7112 don't represent the call as a call. */
7113 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7114 emit_insn (gen_prologue_mcount ());
7115
7116 if (TARGET_ABI_UNICOSMK)
7117 unicosmk_gen_dsib (&imask);
7118
7119 /* Adjust the stack by the frame size. If the frame size is > 4096
7120 bytes, we need to be sure we probe somewhere in the first and last
7121 4096 bytes (we can probably get away without the latter test) and
7122 every 8192 bytes in between. If the frame size is > 32768, we
7123 do this in a loop. Otherwise, we generate the explicit probe
7124 instructions.
7125
7126 Note that we are only allowed to adjust sp once in the prologue. */
7127
7128 if (frame_size <= 32768)
7129 {
7130 if (frame_size > 4096)
7131 {
7132 int probed = 4096;
7133
7134 do
7135 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7136 ? -probed + 64
7137 : -probed)));
7138 while ((probed += 8192) < frame_size);
7139
7140 /* We only have to do this probe if we aren't saving registers. */
7141 if (sa_size == 0 && probed + 4096 < frame_size)
7142 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7143 }
7144
7145 if (frame_size != 0)
7146 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7147 GEN_INT (TARGET_ABI_UNICOSMK
7148 ? -frame_size + 64
7149 : -frame_size))));
7150 }
7151 else
7152 {
7153 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7154 number of 8192 byte blocks to probe. We then probe each block
7155 in the loop and then set SP to the proper location. If the
7156 amount remaining is > 4096, we have to do one more probe if we
7157 are not saving any registers. */
7158
7159 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7160 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7161 rtx ptr = gen_rtx_REG (DImode, 22);
7162 rtx count = gen_rtx_REG (DImode, 23);
7163 rtx seq;
7164
7165 emit_move_insn (count, GEN_INT (blocks));
7166 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7167 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7168
7169 /* Because of the difficulty in emitting a new basic block this
7170 late in the compilation, generate the loop as a single insn. */
7171 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7172
7173 if (leftover > 4096 && sa_size == 0)
7174 {
7175 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7176 MEM_VOLATILE_P (last) = 1;
7177 emit_move_insn (last, const0_rtx);
7178 }
7179
7180 if (TARGET_ABI_WINDOWS_NT)
7181 {
7182 /* For NT stack unwind (done by 'reverse execution'), it's
7183 not OK to take the result of a loop, even though the value
7184 is already in ptr, so we reload it via a single operation
7185 and subtract it to sp.
7186
7187 Yes, that's correct -- we have to reload the whole constant
7188 into a temporary via ldah+lda then subtract from sp. */
7189
7190 HOST_WIDE_INT lo, hi;
7191 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7192 hi = frame_size - lo;
7193
7194 emit_move_insn (ptr, GEN_INT (hi));
7195 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7196 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7197 ptr));
7198 }
7199 else
7200 {
7201 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7202 GEN_INT (-leftover)));
7203 }
7204
7205 /* This alternative is special, because the DWARF code cannot
7206 possibly intuit through the loop above. So we invent this
7207 note it looks at instead. */
7208 RTX_FRAME_RELATED_P (seq) = 1;
7209 REG_NOTES (seq)
7210 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7211 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7212 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7213 GEN_INT (TARGET_ABI_UNICOSMK
7214 ? -frame_size + 64
7215 : -frame_size))),
7216 REG_NOTES (seq));
7217 }
7218
7219 if (!TARGET_ABI_UNICOSMK)
7220 {
7221 /* Cope with very large offsets to the register save area. */
7222 sa_reg = stack_pointer_rtx;
7223 if (reg_offset + sa_size > 0x8000)
7224 {
7225 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7226 HOST_WIDE_INT bias;
7227
7228 if (low + sa_size <= 0x8000)
7229 bias = reg_offset - low, reg_offset = low;
7230 else
7231 bias = reg_offset, reg_offset = 0;
7232
7233 sa_reg = gen_rtx_REG (DImode, 24);
7234 FRP (emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx,
7235 GEN_INT (bias))));
7236 }
7237
7238 /* Save regs in stack order. Beginning with VMS PV. */
7239 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7240 {
7241 mem = gen_rtx_MEM (DImode, stack_pointer_rtx);
7242 set_mem_alias_set (mem, alpha_sr_alias_set);
7243 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_PV)));
7244 }
7245
7246 /* Save register RA next. */
7247 if (imask & (1UL << REG_RA))
7248 {
7249 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7250 set_mem_alias_set (mem, alpha_sr_alias_set);
7251 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
7252 imask &= ~(1UL << REG_RA);
7253 reg_offset += 8;
7254 }
7255
7256 /* Now save any other registers required to be saved. */
7257 for (i = 0; i < 31; i++)
7258 if (imask & (1UL << i))
7259 {
7260 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7261 set_mem_alias_set (mem, alpha_sr_alias_set);
7262 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7263 reg_offset += 8;
7264 }
7265
7266 /* Store a zero if requested for unwinding. */
7267 if (imask & (1UL << 31))
7268 {
7269 rtx insn, t;
7270
7271 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7272 set_mem_alias_set (mem, alpha_sr_alias_set);
7273 insn = emit_move_insn (mem, const0_rtx);
7274
7275 RTX_FRAME_RELATED_P (insn) = 1;
7276 t = gen_rtx_REG (Pmode, 31);
7277 t = gen_rtx_SET (VOIDmode, mem, t);
7278 t = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, t, REG_NOTES (insn));
7279 REG_NOTES (insn) = t;
7280
7281 reg_offset += 8;
7282 }
7283
7284 for (i = 0; i < 31; i++)
7285 if (fmask & (1UL << i))
7286 {
7287 mem = gen_rtx_MEM (DFmode, plus_constant (sa_reg, reg_offset));
7288 set_mem_alias_set (mem, alpha_sr_alias_set);
7289 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7290 reg_offset += 8;
7291 }
7292 }
7293 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7294 {
7295 /* The standard frame on the T3E includes space for saving registers.
7296 We just have to use it. We don't have to save the return address and
7297 the old frame pointer here - they are saved in the DSIB. */
7298
7299 reg_offset = -56;
7300 for (i = 9; i < 15; i++)
7301 if (imask & (1UL << i))
7302 {
7303 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7304 reg_offset));
7305 set_mem_alias_set (mem, alpha_sr_alias_set);
7306 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, i)));
7307 reg_offset -= 8;
7308 }
7309 for (i = 2; i < 10; i++)
7310 if (fmask & (1UL << i))
7311 {
7312 mem = gen_rtx_MEM (DFmode, plus_constant (hard_frame_pointer_rtx,
7313 reg_offset));
7314 set_mem_alias_set (mem, alpha_sr_alias_set);
7315 FRP (emit_move_insn (mem, gen_rtx_REG (DFmode, i+32)));
7316 reg_offset -= 8;
7317 }
7318 }
7319
7320 if (TARGET_ABI_OPEN_VMS)
7321 {
7322 if (alpha_procedure_type == PT_REGISTER)
7323 /* Register frame procedures save the fp.
7324 ?? Ought to have a dwarf2 save for this. */
7325 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7326 hard_frame_pointer_rtx);
7327
7328 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7329 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7330 gen_rtx_REG (DImode, REG_PV)));
7331
7332 if (alpha_procedure_type != PT_NULL
7333 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7334 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7335
7336 /* If we have to allocate space for outgoing args, do it now. */
7337 if (current_function_outgoing_args_size != 0)
7338 {
7339 rtx seq
7340 = emit_move_insn (stack_pointer_rtx,
7341 plus_constant
7342 (hard_frame_pointer_rtx,
7343 - (ALPHA_ROUND
7344 (current_function_outgoing_args_size))));
7345
7346 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7347 if ! frame_pointer_needed. Setting the bit will change the CFA
7348 computation rule to use sp again, which would be wrong if we had
7349 frame_pointer_needed, as this means sp might move unpredictably
7350 later on.
7351
7352 Also, note that
7353 frame_pointer_needed
7354 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7355 and
7356 current_function_outgoing_args_size != 0
7357 => alpha_procedure_type != PT_NULL,
7358
7359 so when we are not setting the bit here, we are guaranteed to
7360 have emitted an FRP frame pointer update just before. */
7361 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7362 }
7363 }
7364 else if (!TARGET_ABI_UNICOSMK)
7365 {
7366 /* If we need a frame pointer, set it from the stack pointer. */
7367 if (frame_pointer_needed)
7368 {
7369 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7370 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7371 else
7372 /* This must always be the last instruction in the
7373 prologue, thus we emit a special move + clobber. */
7374 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7375 stack_pointer_rtx, sa_reg)));
7376 }
7377 }
7378
7379 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7380 the prologue, for exception handling reasons, we cannot do this for
7381 any insn that might fault. We could prevent this for mems with a
7382 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7383 have to prevent all such scheduling with a blockage.
7384
7385 Linux, on the other hand, never bothered to implement OSF/1's
7386 exception handling, and so doesn't care about such things. Anyone
7387 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7388
7389 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7390 emit_insn (gen_blockage ());
7391 }
7392
7393 /* Output the textual info surrounding the prologue. */
7394
7395 void
7396 alpha_start_function (FILE *file, const char *fnname,
7397 tree decl ATTRIBUTE_UNUSED)
7398 {
7399 unsigned long imask = 0;
7400 unsigned long fmask = 0;
7401 /* Stack space needed for pushing registers clobbered by us. */
7402 HOST_WIDE_INT sa_size;
7403 /* Complete stack size needed. */
7404 unsigned HOST_WIDE_INT frame_size;
7405 /* Offset from base reg to register save area. */
7406 HOST_WIDE_INT reg_offset;
7407 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7408 int i;
7409
7410 /* Don't emit an extern directive for functions defined in the same file. */
7411 if (TARGET_ABI_UNICOSMK)
7412 {
7413 tree name_tree;
7414 name_tree = get_identifier (fnname);
7415 TREE_ASM_WRITTEN (name_tree) = 1;
7416 }
7417
7418 alpha_fnname = fnname;
7419 sa_size = alpha_sa_size ();
7420
7421 frame_size = get_frame_size ();
7422 if (TARGET_ABI_OPEN_VMS)
7423 frame_size = ALPHA_ROUND (sa_size
7424 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7425 + frame_size
7426 + current_function_pretend_args_size);
7427 else if (TARGET_ABI_UNICOSMK)
7428 frame_size = ALPHA_ROUND (sa_size
7429 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7430 + ALPHA_ROUND (frame_size
7431 + current_function_outgoing_args_size);
7432 else
7433 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7434 + sa_size
7435 + ALPHA_ROUND (frame_size
7436 + current_function_pretend_args_size));
7437
7438 if (TARGET_ABI_OPEN_VMS)
7439 reg_offset = 8;
7440 else
7441 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7442
7443 alpha_sa_mask (&imask, &fmask);
7444
7445 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7446 We have to do that before the .ent directive as we cannot switch
7447 files within procedures with native ecoff because line numbers are
7448 linked to procedure descriptors.
7449 Outputting the lineno helps debugging of one line functions as they
7450 would otherwise get no line number at all. Please note that we would
7451 like to put out last_linenum from final.c, but it is not accessible. */
7452
7453 if (write_symbols == SDB_DEBUG)
7454 {
7455 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7456 ASM_OUTPUT_SOURCE_FILENAME (file,
7457 DECL_SOURCE_FILE (current_function_decl));
7458 #endif
7459 #ifdef ASM_OUTPUT_SOURCE_LINE
7460 if (debug_info_level != DINFO_LEVEL_TERSE)
7461 ASM_OUTPUT_SOURCE_LINE (file,
7462 DECL_SOURCE_LINE (current_function_decl), 0);
7463 #endif
7464 }
7465
7466 /* Issue function start and label. */
7467 if (TARGET_ABI_OPEN_VMS
7468 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7469 {
7470 fputs ("\t.ent ", file);
7471 assemble_name (file, fnname);
7472 putc ('\n', file);
7473
7474 /* If the function needs GP, we'll write the "..ng" label there.
7475 Otherwise, do it here. */
7476 if (TARGET_ABI_OSF
7477 && ! alpha_function_needs_gp
7478 && ! current_function_is_thunk)
7479 {
7480 putc ('$', file);
7481 assemble_name (file, fnname);
7482 fputs ("..ng:\n", file);
7483 }
7484 }
7485
7486 strcpy (entry_label, fnname);
7487 if (TARGET_ABI_OPEN_VMS)
7488 strcat (entry_label, "..en");
7489
7490 /* For public functions, the label must be globalized by appending an
7491 additional colon. */
7492 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7493 strcat (entry_label, ":");
7494
7495 ASM_OUTPUT_LABEL (file, entry_label);
7496 inside_function = TRUE;
7497
7498 if (TARGET_ABI_OPEN_VMS)
7499 fprintf (file, "\t.base $%d\n", vms_base_regno);
7500
7501 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7502 && !flag_inhibit_size_directive)
7503 {
7504 /* Set flags in procedure descriptor to request IEEE-conformant
7505 math-library routines. The value we set it to is PDSC_EXC_IEEE
7506 (/usr/include/pdsc.h). */
7507 fputs ("\t.eflag 48\n", file);
7508 }
7509
7510 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7511 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7512 alpha_arg_offset = -frame_size + 48;
7513
7514 /* Describe our frame. If the frame size is larger than an integer,
7515 print it as zero to avoid an assembler error. We won't be
7516 properly describing such a frame, but that's the best we can do. */
7517 if (TARGET_ABI_UNICOSMK)
7518 ;
7519 else if (TARGET_ABI_OPEN_VMS)
7520 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7521 HOST_WIDE_INT_PRINT_DEC "\n",
7522 vms_unwind_regno,
7523 frame_size >= (1UL << 31) ? 0 : frame_size,
7524 reg_offset);
7525 else if (!flag_inhibit_size_directive)
7526 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7527 (frame_pointer_needed
7528 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7529 frame_size >= (1UL << 31) ? 0 : frame_size,
7530 current_function_pretend_args_size);
7531
7532 /* Describe which registers were spilled. */
7533 if (TARGET_ABI_UNICOSMK)
7534 ;
7535 else if (TARGET_ABI_OPEN_VMS)
7536 {
7537 if (imask)
7538 /* ??? Does VMS care if mask contains ra? The old code didn't
7539 set it, so I don't here. */
7540 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7541 if (fmask)
7542 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7543 if (alpha_procedure_type == PT_REGISTER)
7544 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7545 }
7546 else if (!flag_inhibit_size_directive)
7547 {
7548 if (imask)
7549 {
7550 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7551 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7552
7553 for (i = 0; i < 32; ++i)
7554 if (imask & (1UL << i))
7555 reg_offset += 8;
7556 }
7557
7558 if (fmask)
7559 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7560 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7561 }
7562
7563 #if TARGET_ABI_OPEN_VMS
7564 /* Ifdef'ed cause link_section are only available then. */
7565 readonly_data_section ();
7566 fprintf (file, "\t.align 3\n");
7567 assemble_name (file, fnname); fputs ("..na:\n", file);
7568 fputs ("\t.ascii \"", file);
7569 assemble_name (file, fnname);
7570 fputs ("\\0\"\n", file);
7571 alpha_need_linkage (fnname, 1);
7572 text_section ();
7573 #endif
7574 }
7575
7576 /* Emit the .prologue note at the scheduled end of the prologue. */
7577
7578 static void
7579 alpha_output_function_end_prologue (FILE *file)
7580 {
7581 if (TARGET_ABI_UNICOSMK)
7582 ;
7583 else if (TARGET_ABI_OPEN_VMS)
7584 fputs ("\t.prologue\n", file);
7585 else if (TARGET_ABI_WINDOWS_NT)
7586 fputs ("\t.prologue 0\n", file);
7587 else if (!flag_inhibit_size_directive)
7588 fprintf (file, "\t.prologue %d\n",
7589 alpha_function_needs_gp || current_function_is_thunk);
7590 }
7591
7592 /* Write function epilogue. */
7593
7594 /* ??? At some point we will want to support full unwind, and so will
7595 need to mark the epilogue as well. At the moment, we just confuse
7596 dwarf2out. */
7597 #undef FRP
7598 #define FRP(exp) exp
7599
7600 void
7601 alpha_expand_epilogue (void)
7602 {
7603 /* Registers to save. */
7604 unsigned long imask = 0;
7605 unsigned long fmask = 0;
7606 /* Stack space needed for pushing registers clobbered by us. */
7607 HOST_WIDE_INT sa_size;
7608 /* Complete stack size needed. */
7609 HOST_WIDE_INT frame_size;
7610 /* Offset from base reg to register save area. */
7611 HOST_WIDE_INT reg_offset;
7612 int fp_is_frame_pointer, fp_offset;
7613 rtx sa_reg, sa_reg_exp = NULL;
7614 rtx sp_adj1, sp_adj2, mem;
7615 rtx eh_ofs;
7616 int i;
7617
7618 sa_size = alpha_sa_size ();
7619
7620 frame_size = get_frame_size ();
7621 if (TARGET_ABI_OPEN_VMS)
7622 frame_size = ALPHA_ROUND (sa_size
7623 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7624 + frame_size
7625 + current_function_pretend_args_size);
7626 else if (TARGET_ABI_UNICOSMK)
7627 frame_size = ALPHA_ROUND (sa_size
7628 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7629 + ALPHA_ROUND (frame_size
7630 + current_function_outgoing_args_size);
7631 else
7632 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7633 + sa_size
7634 + ALPHA_ROUND (frame_size
7635 + current_function_pretend_args_size));
7636
7637 if (TARGET_ABI_OPEN_VMS)
7638 {
7639 if (alpha_procedure_type == PT_STACK)
7640 reg_offset = 8;
7641 else
7642 reg_offset = 0;
7643 }
7644 else
7645 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7646
7647 alpha_sa_mask (&imask, &fmask);
7648
7649 fp_is_frame_pointer
7650 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7651 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
7652 fp_offset = 0;
7653 sa_reg = stack_pointer_rtx;
7654
7655 if (current_function_calls_eh_return)
7656 eh_ofs = EH_RETURN_STACKADJ_RTX;
7657 else
7658 eh_ofs = NULL_RTX;
7659
7660 if (!TARGET_ABI_UNICOSMK && sa_size)
7661 {
7662 /* If we have a frame pointer, restore SP from it. */
7663 if ((TARGET_ABI_OPEN_VMS
7664 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7665 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
7666 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
7667
7668 /* Cope with very large offsets to the register save area. */
7669 if (reg_offset + sa_size > 0x8000)
7670 {
7671 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7672 HOST_WIDE_INT bias;
7673
7674 if (low + sa_size <= 0x8000)
7675 bias = reg_offset - low, reg_offset = low;
7676 else
7677 bias = reg_offset, reg_offset = 0;
7678
7679 sa_reg = gen_rtx_REG (DImode, 22);
7680 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
7681
7682 FRP (emit_move_insn (sa_reg, sa_reg_exp));
7683 }
7684
7685 /* Restore registers in order, excepting a true frame pointer. */
7686
7687 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7688 if (! eh_ofs)
7689 set_mem_alias_set (mem, alpha_sr_alias_set);
7690 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7691
7692 reg_offset += 8;
7693 imask &= ~(1UL << REG_RA);
7694
7695 for (i = 0; i < 31; ++i)
7696 if (imask & (1UL << i))
7697 {
7698 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
7699 fp_offset = reg_offset;
7700 else
7701 {
7702 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
7703 set_mem_alias_set (mem, alpha_sr_alias_set);
7704 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7705 }
7706 reg_offset += 8;
7707 }
7708
7709 if (imask & (1UL << 31))
7710 reg_offset += 8;
7711
7712 for (i = 0; i < 31; ++i)
7713 if (fmask & (1UL << i))
7714 {
7715 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
7716 set_mem_alias_set (mem, alpha_sr_alias_set);
7717 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7718 reg_offset += 8;
7719 }
7720 }
7721 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7722 {
7723 /* Restore callee-saved general-purpose registers. */
7724
7725 reg_offset = -56;
7726
7727 for (i = 9; i < 15; i++)
7728 if (imask & (1UL << i))
7729 {
7730 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7731 reg_offset));
7732 set_mem_alias_set (mem, alpha_sr_alias_set);
7733 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7734 reg_offset -= 8;
7735 }
7736
7737 for (i = 2; i < 10; i++)
7738 if (fmask & (1UL << i))
7739 {
7740 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
7741 reg_offset));
7742 set_mem_alias_set (mem, alpha_sr_alias_set);
7743 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7744 reg_offset -= 8;
7745 }
7746
7747 /* Restore the return address from the DSIB. */
7748
7749 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
7750 set_mem_alias_set (mem, alpha_sr_alias_set);
7751 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7752 }
7753
7754 if (frame_size || eh_ofs)
7755 {
7756 sp_adj1 = stack_pointer_rtx;
7757
7758 if (eh_ofs)
7759 {
7760 sp_adj1 = gen_rtx_REG (DImode, 23);
7761 emit_move_insn (sp_adj1,
7762 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
7763 }
7764
7765 /* If the stack size is large, begin computation into a temporary
7766 register so as not to interfere with a potential fp restore,
7767 which must be consecutive with an SP restore. */
7768 if (frame_size < 32768
7769 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
7770 sp_adj2 = GEN_INT (frame_size);
7771 else if (TARGET_ABI_UNICOSMK)
7772 {
7773 sp_adj1 = gen_rtx_REG (DImode, 23);
7774 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
7775 sp_adj2 = const0_rtx;
7776 }
7777 else if (frame_size < 0x40007fffL)
7778 {
7779 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7780
7781 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
7782 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
7783 sp_adj1 = sa_reg;
7784 else
7785 {
7786 sp_adj1 = gen_rtx_REG (DImode, 23);
7787 FRP (emit_move_insn (sp_adj1, sp_adj2));
7788 }
7789 sp_adj2 = GEN_INT (low);
7790 }
7791 else
7792 {
7793 rtx tmp = gen_rtx_REG (DImode, 23);
7794 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3));
7795 if (!sp_adj2)
7796 {
7797 /* We can't drop new things to memory this late, afaik,
7798 so build it up by pieces. */
7799 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
7800 -(frame_size < 0)));
7801 if (!sp_adj2)
7802 abort ();
7803 }
7804 }
7805
7806 /* From now on, things must be in order. So emit blockages. */
7807
7808 /* Restore the frame pointer. */
7809 if (TARGET_ABI_UNICOSMK)
7810 {
7811 emit_insn (gen_blockage ());
7812 mem = gen_rtx_MEM (DImode,
7813 plus_constant (hard_frame_pointer_rtx, -16));
7814 set_mem_alias_set (mem, alpha_sr_alias_set);
7815 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7816 }
7817 else if (fp_is_frame_pointer)
7818 {
7819 emit_insn (gen_blockage ());
7820 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
7821 set_mem_alias_set (mem, alpha_sr_alias_set);
7822 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7823 }
7824 else if (TARGET_ABI_OPEN_VMS)
7825 {
7826 emit_insn (gen_blockage ());
7827 FRP (emit_move_insn (hard_frame_pointer_rtx,
7828 gen_rtx_REG (DImode, vms_save_fp_regno)));
7829 }
7830
7831 /* Restore the stack pointer. */
7832 emit_insn (gen_blockage ());
7833 if (sp_adj2 == const0_rtx)
7834 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
7835 else
7836 FRP (emit_move_insn (stack_pointer_rtx,
7837 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
7838 }
7839 else
7840 {
7841 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
7842 {
7843 emit_insn (gen_blockage ());
7844 FRP (emit_move_insn (hard_frame_pointer_rtx,
7845 gen_rtx_REG (DImode, vms_save_fp_regno)));
7846 }
7847 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
7848 {
7849 /* Decrement the frame pointer if the function does not have a
7850 frame. */
7851
7852 emit_insn (gen_blockage ());
7853 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
7854 hard_frame_pointer_rtx, constm1_rtx)));
7855 }
7856 }
7857 }
7858 \f
7859 /* Output the rest of the textual info surrounding the epilogue. */
7860
7861 void
7862 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
7863 {
7864 /* End the function. */
7865 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
7866 {
7867 fputs ("\t.end ", file);
7868 assemble_name (file, fnname);
7869 putc ('\n', file);
7870 }
7871 inside_function = FALSE;
7872
7873 #if TARGET_ABI_OPEN_VMS
7874 alpha_write_linkage (file, fnname, decl);
7875 #endif
7876
7877 /* Output jump tables and the static subroutine information block. */
7878 if (TARGET_ABI_UNICOSMK)
7879 {
7880 unicosmk_output_ssib (file, fnname);
7881 unicosmk_output_deferred_case_vectors (file);
7882 }
7883 }
7884
7885 #if TARGET_ABI_OSF
7886 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
7887
7888 In order to avoid the hordes of differences between generated code
7889 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
7890 lots of code loading up large constants, generate rtl and emit it
7891 instead of going straight to text.
7892
7893 Not sure why this idea hasn't been explored before... */
7894
7895 static void
7896 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7897 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7898 tree function)
7899 {
7900 HOST_WIDE_INT hi, lo;
7901 rtx this, insn, funexp;
7902
7903 /* We always require a valid GP. */
7904 emit_insn (gen_prologue_ldgp ());
7905 emit_note (NOTE_INSN_PROLOGUE_END);
7906
7907 /* Find the "this" pointer. If the function returns a structure,
7908 the structure return pointer is in $16. */
7909 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7910 this = gen_rtx_REG (Pmode, 17);
7911 else
7912 this = gen_rtx_REG (Pmode, 16);
7913
7914 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
7915 entire constant for the add. */
7916 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
7917 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7918 if (hi + lo == delta)
7919 {
7920 if (hi)
7921 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
7922 if (lo)
7923 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
7924 }
7925 else
7926 {
7927 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
7928 delta, -(delta < 0));
7929 emit_insn (gen_adddi3 (this, this, tmp));
7930 }
7931
7932 /* Add a delta stored in the vtable at VCALL_OFFSET. */
7933 if (vcall_offset)
7934 {
7935 rtx tmp, tmp2;
7936
7937 tmp = gen_rtx_REG (Pmode, 0);
7938 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
7939
7940 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
7941 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7942 if (hi + lo == vcall_offset)
7943 {
7944 if (hi)
7945 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
7946 }
7947 else
7948 {
7949 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
7950 vcall_offset, -(vcall_offset < 0));
7951 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
7952 lo = 0;
7953 }
7954 if (lo)
7955 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
7956 else
7957 tmp2 = tmp;
7958 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
7959
7960 emit_insn (gen_adddi3 (this, this, tmp));
7961 }
7962
7963 /* Generate a tail call to the target function. */
7964 if (! TREE_USED (function))
7965 {
7966 assemble_external (function);
7967 TREE_USED (function) = 1;
7968 }
7969 funexp = XEXP (DECL_RTL (function), 0);
7970 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
7971 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
7972 SIBLING_CALL_P (insn) = 1;
7973
7974 /* Run just enough of rest_of_compilation to get the insns emitted.
7975 There's not really enough bulk here to make other passes such as
7976 instruction scheduling worth while. Note that use_thunk calls
7977 assemble_start_function and assemble_end_function. */
7978 insn = get_insns ();
7979 insn_locators_initialize ();
7980 shorten_branches (insn);
7981 final_start_function (insn, file, 1);
7982 final (insn, file, 1, 0);
7983 final_end_function ();
7984 }
7985 #endif /* TARGET_ABI_OSF */
7986 \f
7987 /* Debugging support. */
7988
7989 #include "gstab.h"
7990
7991 /* Count the number of sdb related labels are generated (to find block
7992 start and end boundaries). */
7993
7994 int sdb_label_count = 0;
7995
7996 /* Next label # for each statement. */
7997
7998 static int sym_lineno = 0;
7999
8000 /* Count the number of .file directives, so that .loc is up to date. */
8001
8002 static int num_source_filenames = 0;
8003
8004 /* Name of the file containing the current function. */
8005
8006 static const char *current_function_file = "";
8007
8008 /* Offsets to alpha virtual arg/local debugging pointers. */
8009
8010 long alpha_arg_offset;
8011 long alpha_auto_offset;
8012 \f
8013 /* Emit a new filename to a stream. */
8014
8015 void
8016 alpha_output_filename (FILE *stream, const char *name)
8017 {
8018 static int first_time = TRUE;
8019 char ltext_label_name[100];
8020
8021 if (first_time)
8022 {
8023 first_time = FALSE;
8024 ++num_source_filenames;
8025 current_function_file = name;
8026 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8027 output_quoted_string (stream, name);
8028 fprintf (stream, "\n");
8029 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8030 fprintf (stream, "\t#@stabs\n");
8031 }
8032
8033 else if (write_symbols == DBX_DEBUG)
8034 {
8035 ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
8036 fprintf (stream, "%s", ASM_STABS_OP);
8037 output_quoted_string (stream, name);
8038 fprintf (stream, ",%d,0,0,%s\n", N_SOL, &ltext_label_name[1]);
8039 }
8040
8041 else if (name != current_function_file
8042 && strcmp (name, current_function_file) != 0)
8043 {
8044 if (inside_function && ! TARGET_GAS)
8045 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8046 else
8047 {
8048 ++num_source_filenames;
8049 current_function_file = name;
8050 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8051 }
8052
8053 output_quoted_string (stream, name);
8054 fprintf (stream, "\n");
8055 }
8056 }
8057
8058 /* Emit a linenumber to a stream. */
8059
8060 void
8061 alpha_output_lineno (FILE *stream, int line)
8062 {
8063 if (write_symbols == DBX_DEBUG)
8064 {
8065 /* mips-tfile doesn't understand .stabd directives. */
8066 ++sym_lineno;
8067 fprintf (stream, "$LM%d:\n%s%d,0,%d,$LM%d\n",
8068 sym_lineno, ASM_STABN_OP, N_SLINE, line, sym_lineno);
8069 }
8070 else
8071 fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
8072 }
8073 \f
8074 /* Structure to show the current status of registers and memory. */
8075
8076 struct shadow_summary
8077 {
8078 struct {
8079 unsigned int i : 31; /* Mask of int regs */
8080 unsigned int fp : 31; /* Mask of fp regs */
8081 unsigned int mem : 1; /* mem == imem | fpmem */
8082 } used, defd;
8083 };
8084
8085 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8086 to the summary structure. SET is nonzero if the insn is setting the
8087 object, otherwise zero. */
8088
8089 static void
8090 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8091 {
8092 const char *format_ptr;
8093 int i, j;
8094
8095 if (x == 0)
8096 return;
8097
8098 switch (GET_CODE (x))
8099 {
8100 /* ??? Note that this case would be incorrect if the Alpha had a
8101 ZERO_EXTRACT in SET_DEST. */
8102 case SET:
8103 summarize_insn (SET_SRC (x), sum, 0);
8104 summarize_insn (SET_DEST (x), sum, 1);
8105 break;
8106
8107 case CLOBBER:
8108 summarize_insn (XEXP (x, 0), sum, 1);
8109 break;
8110
8111 case USE:
8112 summarize_insn (XEXP (x, 0), sum, 0);
8113 break;
8114
8115 case ASM_OPERANDS:
8116 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8117 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8118 break;
8119
8120 case PARALLEL:
8121 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8122 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8123 break;
8124
8125 case SUBREG:
8126 summarize_insn (SUBREG_REG (x), sum, 0);
8127 break;
8128
8129 case REG:
8130 {
8131 int regno = REGNO (x);
8132 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8133
8134 if (regno == 31 || regno == 63)
8135 break;
8136
8137 if (set)
8138 {
8139 if (regno < 32)
8140 sum->defd.i |= mask;
8141 else
8142 sum->defd.fp |= mask;
8143 }
8144 else
8145 {
8146 if (regno < 32)
8147 sum->used.i |= mask;
8148 else
8149 sum->used.fp |= mask;
8150 }
8151 }
8152 break;
8153
8154 case MEM:
8155 if (set)
8156 sum->defd.mem = 1;
8157 else
8158 sum->used.mem = 1;
8159
8160 /* Find the regs used in memory address computation: */
8161 summarize_insn (XEXP (x, 0), sum, 0);
8162 break;
8163
8164 case CONST_INT: case CONST_DOUBLE:
8165 case SYMBOL_REF: case LABEL_REF: case CONST:
8166 case SCRATCH: case ASM_INPUT:
8167 break;
8168
8169 /* Handle common unary and binary ops for efficiency. */
8170 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8171 case MOD: case UDIV: case UMOD: case AND: case IOR:
8172 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8173 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8174 case NE: case EQ: case GE: case GT: case LE:
8175 case LT: case GEU: case GTU: case LEU: case LTU:
8176 summarize_insn (XEXP (x, 0), sum, 0);
8177 summarize_insn (XEXP (x, 1), sum, 0);
8178 break;
8179
8180 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8181 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8182 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8183 case SQRT: case FFS:
8184 summarize_insn (XEXP (x, 0), sum, 0);
8185 break;
8186
8187 default:
8188 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8189 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8190 switch (format_ptr[i])
8191 {
8192 case 'e':
8193 summarize_insn (XEXP (x, i), sum, 0);
8194 break;
8195
8196 case 'E':
8197 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8198 summarize_insn (XVECEXP (x, i, j), sum, 0);
8199 break;
8200
8201 case 'i':
8202 break;
8203
8204 default:
8205 abort ();
8206 }
8207 }
8208 }
8209
8210 /* Ensure a sufficient number of `trapb' insns are in the code when
8211 the user requests code with a trap precision of functions or
8212 instructions.
8213
8214 In naive mode, when the user requests a trap-precision of
8215 "instruction", a trapb is needed after every instruction that may
8216 generate a trap. This ensures that the code is resumption safe but
8217 it is also slow.
8218
8219 When optimizations are turned on, we delay issuing a trapb as long
8220 as possible. In this context, a trap shadow is the sequence of
8221 instructions that starts with a (potentially) trap generating
8222 instruction and extends to the next trapb or call_pal instruction
8223 (but GCC never generates call_pal by itself). We can delay (and
8224 therefore sometimes omit) a trapb subject to the following
8225 conditions:
8226
8227 (a) On entry to the trap shadow, if any Alpha register or memory
8228 location contains a value that is used as an operand value by some
8229 instruction in the trap shadow (live on entry), then no instruction
8230 in the trap shadow may modify the register or memory location.
8231
8232 (b) Within the trap shadow, the computation of the base register
8233 for a memory load or store instruction may not involve using the
8234 result of an instruction that might generate an UNPREDICTABLE
8235 result.
8236
8237 (c) Within the trap shadow, no register may be used more than once
8238 as a destination register. (This is to make life easier for the
8239 trap-handler.)
8240
8241 (d) The trap shadow may not include any branch instructions. */
8242
8243 static void
8244 alpha_handle_trap_shadows (void)
8245 {
8246 struct shadow_summary shadow;
8247 int trap_pending, exception_nesting;
8248 rtx i, n;
8249
8250 trap_pending = 0;
8251 exception_nesting = 0;
8252 shadow.used.i = 0;
8253 shadow.used.fp = 0;
8254 shadow.used.mem = 0;
8255 shadow.defd = shadow.used;
8256
8257 for (i = get_insns (); i ; i = NEXT_INSN (i))
8258 {
8259 if (GET_CODE (i) == NOTE)
8260 {
8261 switch (NOTE_LINE_NUMBER (i))
8262 {
8263 case NOTE_INSN_EH_REGION_BEG:
8264 exception_nesting++;
8265 if (trap_pending)
8266 goto close_shadow;
8267 break;
8268
8269 case NOTE_INSN_EH_REGION_END:
8270 exception_nesting--;
8271 if (trap_pending)
8272 goto close_shadow;
8273 break;
8274
8275 case NOTE_INSN_EPILOGUE_BEG:
8276 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8277 goto close_shadow;
8278 break;
8279 }
8280 }
8281 else if (trap_pending)
8282 {
8283 if (alpha_tp == ALPHA_TP_FUNC)
8284 {
8285 if (GET_CODE (i) == JUMP_INSN
8286 && GET_CODE (PATTERN (i)) == RETURN)
8287 goto close_shadow;
8288 }
8289 else if (alpha_tp == ALPHA_TP_INSN)
8290 {
8291 if (optimize > 0)
8292 {
8293 struct shadow_summary sum;
8294
8295 sum.used.i = 0;
8296 sum.used.fp = 0;
8297 sum.used.mem = 0;
8298 sum.defd = sum.used;
8299
8300 switch (GET_CODE (i))
8301 {
8302 case INSN:
8303 /* Annoyingly, get_attr_trap will abort on these. */
8304 if (GET_CODE (PATTERN (i)) == USE
8305 || GET_CODE (PATTERN (i)) == CLOBBER)
8306 break;
8307
8308 summarize_insn (PATTERN (i), &sum, 0);
8309
8310 if ((sum.defd.i & shadow.defd.i)
8311 || (sum.defd.fp & shadow.defd.fp))
8312 {
8313 /* (c) would be violated */
8314 goto close_shadow;
8315 }
8316
8317 /* Combine shadow with summary of current insn: */
8318 shadow.used.i |= sum.used.i;
8319 shadow.used.fp |= sum.used.fp;
8320 shadow.used.mem |= sum.used.mem;
8321 shadow.defd.i |= sum.defd.i;
8322 shadow.defd.fp |= sum.defd.fp;
8323 shadow.defd.mem |= sum.defd.mem;
8324
8325 if ((sum.defd.i & shadow.used.i)
8326 || (sum.defd.fp & shadow.used.fp)
8327 || (sum.defd.mem & shadow.used.mem))
8328 {
8329 /* (a) would be violated (also takes care of (b)) */
8330 if (get_attr_trap (i) == TRAP_YES
8331 && ((sum.defd.i & sum.used.i)
8332 || (sum.defd.fp & sum.used.fp)))
8333 abort ();
8334
8335 goto close_shadow;
8336 }
8337 break;
8338
8339 case JUMP_INSN:
8340 case CALL_INSN:
8341 case CODE_LABEL:
8342 goto close_shadow;
8343
8344 default:
8345 abort ();
8346 }
8347 }
8348 else
8349 {
8350 close_shadow:
8351 n = emit_insn_before (gen_trapb (), i);
8352 PUT_MODE (n, TImode);
8353 PUT_MODE (i, TImode);
8354 trap_pending = 0;
8355 shadow.used.i = 0;
8356 shadow.used.fp = 0;
8357 shadow.used.mem = 0;
8358 shadow.defd = shadow.used;
8359 }
8360 }
8361 }
8362
8363 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8364 && GET_CODE (i) == INSN
8365 && GET_CODE (PATTERN (i)) != USE
8366 && GET_CODE (PATTERN (i)) != CLOBBER
8367 && get_attr_trap (i) == TRAP_YES)
8368 {
8369 if (optimize && !trap_pending)
8370 summarize_insn (PATTERN (i), &shadow, 0);
8371 trap_pending = 1;
8372 }
8373 }
8374 }
8375 \f
8376 /* Alpha can only issue instruction groups simultaneously if they are
8377 suitably aligned. This is very processor-specific. */
8378
8379 enum alphaev4_pipe {
8380 EV4_STOP = 0,
8381 EV4_IB0 = 1,
8382 EV4_IB1 = 2,
8383 EV4_IBX = 4
8384 };
8385
8386 enum alphaev5_pipe {
8387 EV5_STOP = 0,
8388 EV5_NONE = 1,
8389 EV5_E01 = 2,
8390 EV5_E0 = 4,
8391 EV5_E1 = 8,
8392 EV5_FAM = 16,
8393 EV5_FA = 32,
8394 EV5_FM = 64
8395 };
8396
8397 static enum alphaev4_pipe
8398 alphaev4_insn_pipe (rtx insn)
8399 {
8400 if (recog_memoized (insn) < 0)
8401 return EV4_STOP;
8402 if (get_attr_length (insn) != 4)
8403 return EV4_STOP;
8404
8405 switch (get_attr_type (insn))
8406 {
8407 case TYPE_ILD:
8408 case TYPE_FLD:
8409 return EV4_IBX;
8410
8411 case TYPE_LDSYM:
8412 case TYPE_IADD:
8413 case TYPE_ILOG:
8414 case TYPE_ICMOV:
8415 case TYPE_ICMP:
8416 case TYPE_IST:
8417 case TYPE_FST:
8418 case TYPE_SHIFT:
8419 case TYPE_IMUL:
8420 case TYPE_FBR:
8421 return EV4_IB0;
8422
8423 case TYPE_MISC:
8424 case TYPE_IBR:
8425 case TYPE_JSR:
8426 case TYPE_CALLPAL:
8427 case TYPE_FCPYS:
8428 case TYPE_FCMOV:
8429 case TYPE_FADD:
8430 case TYPE_FDIV:
8431 case TYPE_FMUL:
8432 return EV4_IB1;
8433
8434 default:
8435 abort ();
8436 }
8437 }
8438
8439 static enum alphaev5_pipe
8440 alphaev5_insn_pipe (rtx insn)
8441 {
8442 if (recog_memoized (insn) < 0)
8443 return EV5_STOP;
8444 if (get_attr_length (insn) != 4)
8445 return EV5_STOP;
8446
8447 switch (get_attr_type (insn))
8448 {
8449 case TYPE_ILD:
8450 case TYPE_FLD:
8451 case TYPE_LDSYM:
8452 case TYPE_IADD:
8453 case TYPE_ILOG:
8454 case TYPE_ICMOV:
8455 case TYPE_ICMP:
8456 return EV5_E01;
8457
8458 case TYPE_IST:
8459 case TYPE_FST:
8460 case TYPE_SHIFT:
8461 case TYPE_IMUL:
8462 case TYPE_MISC:
8463 case TYPE_MVI:
8464 return EV5_E0;
8465
8466 case TYPE_IBR:
8467 case TYPE_JSR:
8468 case TYPE_CALLPAL:
8469 return EV5_E1;
8470
8471 case TYPE_FCPYS:
8472 return EV5_FAM;
8473
8474 case TYPE_FBR:
8475 case TYPE_FCMOV:
8476 case TYPE_FADD:
8477 case TYPE_FDIV:
8478 return EV5_FA;
8479
8480 case TYPE_FMUL:
8481 return EV5_FM;
8482
8483 default:
8484 abort();
8485 }
8486 }
8487
8488 /* IN_USE is a mask of the slots currently filled within the insn group.
8489 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8490 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8491
8492 LEN is, of course, the length of the group in bytes. */
8493
8494 static rtx
8495 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8496 {
8497 int len, in_use;
8498
8499 len = in_use = 0;
8500
8501 if (! INSN_P (insn)
8502 || GET_CODE (PATTERN (insn)) == CLOBBER
8503 || GET_CODE (PATTERN (insn)) == USE)
8504 goto next_and_done;
8505
8506 while (1)
8507 {
8508 enum alphaev4_pipe pipe;
8509
8510 pipe = alphaev4_insn_pipe (insn);
8511 switch (pipe)
8512 {
8513 case EV4_STOP:
8514 /* Force complex instructions to start new groups. */
8515 if (in_use)
8516 goto done;
8517
8518 /* If this is a completely unrecognized insn, its an asm.
8519 We don't know how long it is, so record length as -1 to
8520 signal a needed realignment. */
8521 if (recog_memoized (insn) < 0)
8522 len = -1;
8523 else
8524 len = get_attr_length (insn);
8525 goto next_and_done;
8526
8527 case EV4_IBX:
8528 if (in_use & EV4_IB0)
8529 {
8530 if (in_use & EV4_IB1)
8531 goto done;
8532 in_use |= EV4_IB1;
8533 }
8534 else
8535 in_use |= EV4_IB0 | EV4_IBX;
8536 break;
8537
8538 case EV4_IB0:
8539 if (in_use & EV4_IB0)
8540 {
8541 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8542 goto done;
8543 in_use |= EV4_IB1;
8544 }
8545 in_use |= EV4_IB0;
8546 break;
8547
8548 case EV4_IB1:
8549 if (in_use & EV4_IB1)
8550 goto done;
8551 in_use |= EV4_IB1;
8552 break;
8553
8554 default:
8555 abort();
8556 }
8557 len += 4;
8558
8559 /* Haifa doesn't do well scheduling branches. */
8560 if (GET_CODE (insn) == JUMP_INSN)
8561 goto next_and_done;
8562
8563 next:
8564 insn = next_nonnote_insn (insn);
8565
8566 if (!insn || ! INSN_P (insn))
8567 goto done;
8568
8569 /* Let Haifa tell us where it thinks insn group boundaries are. */
8570 if (GET_MODE (insn) == TImode)
8571 goto done;
8572
8573 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8574 goto next;
8575 }
8576
8577 next_and_done:
8578 insn = next_nonnote_insn (insn);
8579
8580 done:
8581 *plen = len;
8582 *pin_use = in_use;
8583 return insn;
8584 }
8585
8586 /* IN_USE is a mask of the slots currently filled within the insn group.
8587 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8588 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8589
8590 LEN is, of course, the length of the group in bytes. */
8591
8592 static rtx
8593 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8594 {
8595 int len, in_use;
8596
8597 len = in_use = 0;
8598
8599 if (! INSN_P (insn)
8600 || GET_CODE (PATTERN (insn)) == CLOBBER
8601 || GET_CODE (PATTERN (insn)) == USE)
8602 goto next_and_done;
8603
8604 while (1)
8605 {
8606 enum alphaev5_pipe pipe;
8607
8608 pipe = alphaev5_insn_pipe (insn);
8609 switch (pipe)
8610 {
8611 case EV5_STOP:
8612 /* Force complex instructions to start new groups. */
8613 if (in_use)
8614 goto done;
8615
8616 /* If this is a completely unrecognized insn, its an asm.
8617 We don't know how long it is, so record length as -1 to
8618 signal a needed realignment. */
8619 if (recog_memoized (insn) < 0)
8620 len = -1;
8621 else
8622 len = get_attr_length (insn);
8623 goto next_and_done;
8624
8625 /* ??? Most of the places below, we would like to abort, as
8626 it would indicate an error either in Haifa, or in the
8627 scheduling description. Unfortunately, Haifa never
8628 schedules the last instruction of the BB, so we don't
8629 have an accurate TI bit to go off. */
8630 case EV5_E01:
8631 if (in_use & EV5_E0)
8632 {
8633 if (in_use & EV5_E1)
8634 goto done;
8635 in_use |= EV5_E1;
8636 }
8637 else
8638 in_use |= EV5_E0 | EV5_E01;
8639 break;
8640
8641 case EV5_E0:
8642 if (in_use & EV5_E0)
8643 {
8644 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8645 goto done;
8646 in_use |= EV5_E1;
8647 }
8648 in_use |= EV5_E0;
8649 break;
8650
8651 case EV5_E1:
8652 if (in_use & EV5_E1)
8653 goto done;
8654 in_use |= EV5_E1;
8655 break;
8656
8657 case EV5_FAM:
8658 if (in_use & EV5_FA)
8659 {
8660 if (in_use & EV5_FM)
8661 goto done;
8662 in_use |= EV5_FM;
8663 }
8664 else
8665 in_use |= EV5_FA | EV5_FAM;
8666 break;
8667
8668 case EV5_FA:
8669 if (in_use & EV5_FA)
8670 goto done;
8671 in_use |= EV5_FA;
8672 break;
8673
8674 case EV5_FM:
8675 if (in_use & EV5_FM)
8676 goto done;
8677 in_use |= EV5_FM;
8678 break;
8679
8680 case EV5_NONE:
8681 break;
8682
8683 default:
8684 abort();
8685 }
8686 len += 4;
8687
8688 /* Haifa doesn't do well scheduling branches. */
8689 /* ??? If this is predicted not-taken, slotting continues, except
8690 that no more IBR, FBR, or JSR insns may be slotted. */
8691 if (GET_CODE (insn) == JUMP_INSN)
8692 goto next_and_done;
8693
8694 next:
8695 insn = next_nonnote_insn (insn);
8696
8697 if (!insn || ! INSN_P (insn))
8698 goto done;
8699
8700 /* Let Haifa tell us where it thinks insn group boundaries are. */
8701 if (GET_MODE (insn) == TImode)
8702 goto done;
8703
8704 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8705 goto next;
8706 }
8707
8708 next_and_done:
8709 insn = next_nonnote_insn (insn);
8710
8711 done:
8712 *plen = len;
8713 *pin_use = in_use;
8714 return insn;
8715 }
8716
8717 static rtx
8718 alphaev4_next_nop (int *pin_use)
8719 {
8720 int in_use = *pin_use;
8721 rtx nop;
8722
8723 if (!(in_use & EV4_IB0))
8724 {
8725 in_use |= EV4_IB0;
8726 nop = gen_nop ();
8727 }
8728 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
8729 {
8730 in_use |= EV4_IB1;
8731 nop = gen_nop ();
8732 }
8733 else if (TARGET_FP && !(in_use & EV4_IB1))
8734 {
8735 in_use |= EV4_IB1;
8736 nop = gen_fnop ();
8737 }
8738 else
8739 nop = gen_unop ();
8740
8741 *pin_use = in_use;
8742 return nop;
8743 }
8744
8745 static rtx
8746 alphaev5_next_nop (int *pin_use)
8747 {
8748 int in_use = *pin_use;
8749 rtx nop;
8750
8751 if (!(in_use & EV5_E1))
8752 {
8753 in_use |= EV5_E1;
8754 nop = gen_nop ();
8755 }
8756 else if (TARGET_FP && !(in_use & EV5_FA))
8757 {
8758 in_use |= EV5_FA;
8759 nop = gen_fnop ();
8760 }
8761 else if (TARGET_FP && !(in_use & EV5_FM))
8762 {
8763 in_use |= EV5_FM;
8764 nop = gen_fnop ();
8765 }
8766 else
8767 nop = gen_unop ();
8768
8769 *pin_use = in_use;
8770 return nop;
8771 }
8772
8773 /* The instruction group alignment main loop. */
8774
8775 static void
8776 alpha_align_insns (unsigned int max_align,
8777 rtx (*next_group) (rtx, int *, int *),
8778 rtx (*next_nop) (int *))
8779 {
8780 /* ALIGN is the known alignment for the insn group. */
8781 unsigned int align;
8782 /* OFS is the offset of the current insn in the insn group. */
8783 int ofs;
8784 int prev_in_use, in_use, len;
8785 rtx i, next;
8786
8787 /* Let shorten branches care for assigning alignments to code labels. */
8788 shorten_branches (get_insns ());
8789
8790 if (align_functions < 4)
8791 align = 4;
8792 else if ((unsigned int) align_functions < max_align)
8793 align = align_functions;
8794 else
8795 align = max_align;
8796
8797 ofs = prev_in_use = 0;
8798 i = get_insns ();
8799 if (GET_CODE (i) == NOTE)
8800 i = next_nonnote_insn (i);
8801
8802 while (i)
8803 {
8804 next = (*next_group) (i, &in_use, &len);
8805
8806 /* When we see a label, resync alignment etc. */
8807 if (GET_CODE (i) == CODE_LABEL)
8808 {
8809 unsigned int new_align = 1 << label_to_alignment (i);
8810
8811 if (new_align >= align)
8812 {
8813 align = new_align < max_align ? new_align : max_align;
8814 ofs = 0;
8815 }
8816
8817 else if (ofs & (new_align-1))
8818 ofs = (ofs | (new_align-1)) + 1;
8819 if (len != 0)
8820 abort();
8821 }
8822
8823 /* Handle complex instructions special. */
8824 else if (in_use == 0)
8825 {
8826 /* Asms will have length < 0. This is a signal that we have
8827 lost alignment knowledge. Assume, however, that the asm
8828 will not mis-align instructions. */
8829 if (len < 0)
8830 {
8831 ofs = 0;
8832 align = 4;
8833 len = 0;
8834 }
8835 }
8836
8837 /* If the known alignment is smaller than the recognized insn group,
8838 realign the output. */
8839 else if ((int) align < len)
8840 {
8841 unsigned int new_log_align = len > 8 ? 4 : 3;
8842 rtx prev, where;
8843
8844 where = prev = prev_nonnote_insn (i);
8845 if (!where || GET_CODE (where) != CODE_LABEL)
8846 where = i;
8847
8848 /* Can't realign between a call and its gp reload. */
8849 if (! (TARGET_EXPLICIT_RELOCS
8850 && prev && GET_CODE (prev) == CALL_INSN))
8851 {
8852 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
8853 align = 1 << new_log_align;
8854 ofs = 0;
8855 }
8856 }
8857
8858 /* If the group won't fit in the same INT16 as the previous,
8859 we need to add padding to keep the group together. Rather
8860 than simply leaving the insn filling to the assembler, we
8861 can make use of the knowledge of what sorts of instructions
8862 were issued in the previous group to make sure that all of
8863 the added nops are really free. */
8864 else if (ofs + len > (int) align)
8865 {
8866 int nop_count = (align - ofs) / 4;
8867 rtx where;
8868
8869 /* Insert nops before labels, branches, and calls to truly merge
8870 the execution of the nops with the previous instruction group. */
8871 where = prev_nonnote_insn (i);
8872 if (where)
8873 {
8874 if (GET_CODE (where) == CODE_LABEL)
8875 {
8876 rtx where2 = prev_nonnote_insn (where);
8877 if (where2 && GET_CODE (where2) == JUMP_INSN)
8878 where = where2;
8879 }
8880 else if (GET_CODE (where) == INSN)
8881 where = i;
8882 }
8883 else
8884 where = i;
8885
8886 do
8887 emit_insn_before ((*next_nop)(&prev_in_use), where);
8888 while (--nop_count);
8889 ofs = 0;
8890 }
8891
8892 ofs = (ofs + len) & (align - 1);
8893 prev_in_use = in_use;
8894 i = next;
8895 }
8896 }
8897 \f
8898 /* Machine dependent reorg pass. */
8899
8900 static void
8901 alpha_reorg (void)
8902 {
8903 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
8904 alpha_handle_trap_shadows ();
8905
8906 /* Due to the number of extra trapb insns, don't bother fixing up
8907 alignment when trap precision is instruction. Moreover, we can
8908 only do our job when sched2 is run. */
8909 if (optimize && !optimize_size
8910 && alpha_tp != ALPHA_TP_INSN
8911 && flag_schedule_insns_after_reload)
8912 {
8913 if (alpha_cpu == PROCESSOR_EV4)
8914 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
8915 else if (alpha_cpu == PROCESSOR_EV5)
8916 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
8917 }
8918 }
8919 \f
8920 #if !TARGET_ABI_UNICOSMK
8921
8922 #ifdef HAVE_STAMP_H
8923 #include <stamp.h>
8924 #endif
8925
8926 static void
8927 alpha_file_start (void)
8928 {
8929 #ifdef OBJECT_FORMAT_ELF
8930 /* If emitting dwarf2 debug information, we cannot generate a .file
8931 directive to start the file, as it will conflict with dwarf2out
8932 file numbers. So it's only useful when emitting mdebug output. */
8933 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
8934 #endif
8935
8936 default_file_start ();
8937 #ifdef MS_STAMP
8938 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
8939 #endif
8940
8941 fputs ("\t.set noreorder\n", asm_out_file);
8942 fputs ("\t.set volatile\n", asm_out_file);
8943 if (!TARGET_ABI_OPEN_VMS)
8944 fputs ("\t.set noat\n", asm_out_file);
8945 if (TARGET_EXPLICIT_RELOCS)
8946 fputs ("\t.set nomacro\n", asm_out_file);
8947 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8948 fprintf (asm_out_file,
8949 "\t.arch %s\n",
8950 TARGET_CPU_EV6 ? "ev6"
8951 : (TARGET_CPU_EV5
8952 ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
8953 : "ev4"));
8954 }
8955 #endif
8956
8957 #ifdef OBJECT_FORMAT_ELF
8958
8959 /* Switch to the section to which we should output X. The only thing
8960 special we do here is to honor small data. */
8961
8962 static void
8963 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
8964 unsigned HOST_WIDE_INT align)
8965 {
8966 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
8967 /* ??? Consider using mergeable sdata sections. */
8968 sdata_section ();
8969 else
8970 default_elf_select_rtx_section (mode, x, align);
8971 }
8972
8973 #endif /* OBJECT_FORMAT_ELF */
8974 \f
8975 /* Structure to collect function names for final output in link section. */
8976 /* Note that items marked with GTY can't be ifdef'ed out. */
8977
8978 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
8979 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
8980
8981 struct alpha_links GTY(())
8982 {
8983 int num;
8984 rtx linkage;
8985 enum links_kind lkind;
8986 enum reloc_kind rkind;
8987 };
8988
8989 struct alpha_funcs GTY(())
8990 {
8991 int num;
8992 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8993 links;
8994 };
8995
8996 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8997 splay_tree alpha_links_tree;
8998 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
8999 splay_tree alpha_funcs_tree;
9000
9001 static GTY(()) int alpha_funcs_num;
9002
9003 #if TARGET_ABI_OPEN_VMS
9004
9005 /* Return the VMS argument type corresponding to MODE. */
9006
9007 enum avms_arg_type
9008 alpha_arg_type (enum machine_mode mode)
9009 {
9010 switch (mode)
9011 {
9012 case SFmode:
9013 return TARGET_FLOAT_VAX ? FF : FS;
9014 case DFmode:
9015 return TARGET_FLOAT_VAX ? FD : FT;
9016 default:
9017 return I64;
9018 }
9019 }
9020
9021 /* Return an rtx for an integer representing the VMS Argument Information
9022 register value. */
9023
9024 rtx
9025 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9026 {
9027 unsigned HOST_WIDE_INT regval = cum.num_args;
9028 int i;
9029
9030 for (i = 0; i < 6; i++)
9031 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9032
9033 return GEN_INT (regval);
9034 }
9035 \f
9036 /* Make (or fake) .linkage entry for function call.
9037
9038 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9039
9040 Return an SYMBOL_REF rtx for the linkage. */
9041
9042 rtx
9043 alpha_need_linkage (const char *name, int is_local)
9044 {
9045 splay_tree_node node;
9046 struct alpha_links *al;
9047
9048 if (name[0] == '*')
9049 name++;
9050
9051 if (is_local)
9052 {
9053 struct alpha_funcs *cfaf;
9054
9055 if (!alpha_funcs_tree)
9056 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9057 splay_tree_compare_pointers);
9058
9059 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9060
9061 cfaf->links = 0;
9062 cfaf->num = ++alpha_funcs_num;
9063
9064 splay_tree_insert (alpha_funcs_tree,
9065 (splay_tree_key) current_function_decl,
9066 (splay_tree_value) cfaf);
9067 }
9068
9069 if (alpha_links_tree)
9070 {
9071 /* Is this name already defined? */
9072
9073 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9074 if (node)
9075 {
9076 al = (struct alpha_links *) node->value;
9077 if (is_local)
9078 {
9079 /* Defined here but external assumed. */
9080 if (al->lkind == KIND_EXTERN)
9081 al->lkind = KIND_LOCAL;
9082 }
9083 else
9084 {
9085 /* Used here but unused assumed. */
9086 if (al->lkind == KIND_UNUSED)
9087 al->lkind = KIND_LOCAL;
9088 }
9089 return al->linkage;
9090 }
9091 }
9092 else
9093 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9094
9095 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9096 name = ggc_strdup (name);
9097
9098 /* Assume external if no definition. */
9099 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9100
9101 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9102 get_identifier (name);
9103
9104 /* Construct a SYMBOL_REF for us to call. */
9105 {
9106 size_t name_len = strlen (name);
9107 char *linksym = alloca (name_len + 6);
9108 linksym[0] = '$';
9109 memcpy (linksym + 1, name, name_len);
9110 memcpy (linksym + 1 + name_len, "..lk", 5);
9111 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9112 ggc_alloc_string (linksym, name_len + 5));
9113 }
9114
9115 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9116 (splay_tree_value) al);
9117
9118 return al->linkage;
9119 }
9120
9121 rtx
9122 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9123 {
9124 splay_tree_node cfunnode;
9125 struct alpha_funcs *cfaf;
9126 struct alpha_links *al;
9127 const char *name = XSTR (linkage, 0);
9128
9129 cfaf = (struct alpha_funcs *) 0;
9130 al = (struct alpha_links *) 0;
9131
9132 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9133 cfaf = (struct alpha_funcs *) cfunnode->value;
9134
9135 if (cfaf->links)
9136 {
9137 splay_tree_node lnode;
9138
9139 /* Is this name already defined? */
9140
9141 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9142 if (lnode)
9143 al = (struct alpha_links *) lnode->value;
9144 }
9145 else
9146 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9147
9148 if (!al)
9149 {
9150 size_t name_len;
9151 size_t buflen;
9152 char buf [512];
9153 char *linksym;
9154 splay_tree_node node = 0;
9155 struct alpha_links *anl;
9156
9157 if (name[0] == '*')
9158 name++;
9159
9160 name_len = strlen (name);
9161
9162 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9163 al->num = cfaf->num;
9164
9165 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9166 if (node)
9167 {
9168 anl = (struct alpha_links *) node->value;
9169 al->lkind = anl->lkind;
9170 }
9171
9172 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9173 buflen = strlen (buf);
9174 linksym = alloca (buflen + 1);
9175 memcpy (linksym, buf, buflen + 1);
9176
9177 al->linkage = gen_rtx_SYMBOL_REF
9178 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9179
9180 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9181 (splay_tree_value) al);
9182 }
9183
9184 if (rflag)
9185 al->rkind = KIND_CODEADDR;
9186 else
9187 al->rkind = KIND_LINKAGE;
9188
9189 if (lflag)
9190 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9191 else
9192 return al->linkage;
9193 }
9194
9195 static int
9196 alpha_write_one_linkage (splay_tree_node node, void *data)
9197 {
9198 const char *const name = (const char *) node->key;
9199 struct alpha_links *link = (struct alpha_links *) node->value;
9200 FILE *stream = (FILE *) data;
9201
9202 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9203 if (link->rkind == KIND_CODEADDR)
9204 {
9205 if (link->lkind == KIND_LOCAL)
9206 {
9207 /* Local and used */
9208 fprintf (stream, "\t.quad %s..en\n", name);
9209 }
9210 else
9211 {
9212 /* External and used, request code address. */
9213 fprintf (stream, "\t.code_address %s\n", name);
9214 }
9215 }
9216 else
9217 {
9218 if (link->lkind == KIND_LOCAL)
9219 {
9220 /* Local and used, build linkage pair. */
9221 fprintf (stream, "\t.quad %s..en\n", name);
9222 fprintf (stream, "\t.quad %s\n", name);
9223 }
9224 else
9225 {
9226 /* External and used, request linkage pair. */
9227 fprintf (stream, "\t.linkage %s\n", name);
9228 }
9229 }
9230
9231 return 0;
9232 }
9233
9234 static void
9235 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9236 {
9237 splay_tree_node node;
9238 struct alpha_funcs *func;
9239
9240 link_section ();
9241 fprintf (stream, "\t.align 3\n");
9242 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9243 func = (struct alpha_funcs *) node->value;
9244
9245 fputs ("\t.name ", stream);
9246 assemble_name (stream, funname);
9247 fputs ("..na\n", stream);
9248 ASM_OUTPUT_LABEL (stream, funname);
9249 fprintf (stream, "\t.pdesc ");
9250 assemble_name (stream, funname);
9251 fprintf (stream, "..en,%s\n",
9252 alpha_procedure_type == PT_STACK ? "stack"
9253 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9254
9255 if (func->links)
9256 {
9257 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9258 /* splay_tree_delete (func->links); */
9259 }
9260 }
9261
9262 /* Given a decl, a section name, and whether the decl initializer
9263 has relocs, choose attributes for the section. */
9264
9265 #define SECTION_VMS_OVERLAY SECTION_FORGET
9266 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9267 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9268
9269 static unsigned int
9270 vms_section_type_flags (tree decl, const char *name, int reloc)
9271 {
9272 unsigned int flags = default_section_type_flags (decl, name, reloc);
9273
9274 if (decl && DECL_ATTRIBUTES (decl)
9275 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9276 flags |= SECTION_VMS_OVERLAY;
9277 if (decl && DECL_ATTRIBUTES (decl)
9278 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9279 flags |= SECTION_VMS_GLOBAL;
9280 if (decl && DECL_ATTRIBUTES (decl)
9281 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9282 flags |= SECTION_VMS_INITIALIZE;
9283
9284 return flags;
9285 }
9286
9287 /* Switch to an arbitrary section NAME with attributes as specified
9288 by FLAGS. ALIGN specifies any known alignment requirements for
9289 the section; 0 if the default should be used. */
9290
9291 static void
9292 vms_asm_named_section (const char *name, unsigned int flags)
9293 {
9294 fputc ('\n', asm_out_file);
9295 fprintf (asm_out_file, ".section\t%s", name);
9296
9297 if (flags & SECTION_VMS_OVERLAY)
9298 fprintf (asm_out_file, ",OVR");
9299 if (flags & SECTION_VMS_GLOBAL)
9300 fprintf (asm_out_file, ",GBL");
9301 if (flags & SECTION_VMS_INITIALIZE)
9302 fprintf (asm_out_file, ",NOMOD");
9303 if (flags & SECTION_DEBUG)
9304 fprintf (asm_out_file, ",NOWRT");
9305
9306 fputc ('\n', asm_out_file);
9307 }
9308
9309 /* Record an element in the table of global constructors. SYMBOL is
9310 a SYMBOL_REF of the function to be called; PRIORITY is a number
9311 between 0 and MAX_INIT_PRIORITY.
9312
9313 Differs from default_ctors_section_asm_out_constructor in that the
9314 width of the .ctors entry is always 64 bits, rather than the 32 bits
9315 used by a normal pointer. */
9316
9317 static void
9318 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9319 {
9320 ctors_section ();
9321 assemble_align (BITS_PER_WORD);
9322 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9323 }
9324
9325 static void
9326 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9327 {
9328 dtors_section ();
9329 assemble_align (BITS_PER_WORD);
9330 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9331 }
9332 #else
9333
9334 rtx
9335 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9336 int is_local ATTRIBUTE_UNUSED)
9337 {
9338 return NULL_RTX;
9339 }
9340
9341 rtx
9342 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9343 tree cfundecl ATTRIBUTE_UNUSED,
9344 int lflag ATTRIBUTE_UNUSED,
9345 int rflag ATTRIBUTE_UNUSED)
9346 {
9347 return NULL_RTX;
9348 }
9349
9350 #endif /* TARGET_ABI_OPEN_VMS */
9351 \f
9352 #if TARGET_ABI_UNICOSMK
9353
9354 /* Define the offset between two registers, one to be eliminated, and the
9355 other its replacement, at the start of a routine. */
9356
9357 int
9358 unicosmk_initial_elimination_offset (int from, int to)
9359 {
9360 int fixed_size;
9361
9362 fixed_size = alpha_sa_size();
9363 if (fixed_size != 0)
9364 fixed_size += 48;
9365
9366 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9367 return -fixed_size;
9368 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9369 return 0;
9370 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9371 return (ALPHA_ROUND (current_function_outgoing_args_size)
9372 + ALPHA_ROUND (get_frame_size()));
9373 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9374 return (ALPHA_ROUND (fixed_size)
9375 + ALPHA_ROUND (get_frame_size()
9376 + current_function_outgoing_args_size));
9377 else
9378 abort ();
9379 }
9380
9381 /* Output the module name for .ident and .end directives. We have to strip
9382 directories and add make sure that the module name starts with a letter
9383 or '$'. */
9384
9385 static void
9386 unicosmk_output_module_name (FILE *file)
9387 {
9388 const char *name = lbasename (main_input_filename);
9389 unsigned len = strlen (name);
9390 char *clean_name = alloca (len + 2);
9391 char *ptr = clean_name;
9392
9393 /* CAM only accepts module names that start with a letter or '$'. We
9394 prefix the module name with a '$' if necessary. */
9395
9396 if (!ISALPHA (*name))
9397 *ptr++ = '$';
9398 memcpy (ptr, name, len + 1);
9399 clean_symbol_name (clean_name);
9400 fputs (clean_name, file);
9401 }
9402
9403 /* Output the definition of a common variable. */
9404
9405 void
9406 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9407 {
9408 tree name_tree;
9409 printf ("T3E__: common %s\n", name);
9410
9411 common_section ();
9412 fputs("\t.endp\n\n\t.psect ", file);
9413 assemble_name(file, name);
9414 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9415 fprintf(file, "\t.byte\t0:%d\n", size);
9416
9417 /* Mark the symbol as defined in this module. */
9418 name_tree = get_identifier (name);
9419 TREE_ASM_WRITTEN (name_tree) = 1;
9420 }
9421
9422 #define SECTION_PUBLIC SECTION_MACH_DEP
9423 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9424 static int current_section_align;
9425
9426 static unsigned int
9427 unicosmk_section_type_flags (tree decl, const char *name,
9428 int reloc ATTRIBUTE_UNUSED)
9429 {
9430 unsigned int flags = default_section_type_flags (decl, name, reloc);
9431
9432 if (!decl)
9433 return flags;
9434
9435 if (TREE_CODE (decl) == FUNCTION_DECL)
9436 {
9437 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9438 if (align_functions_log > current_section_align)
9439 current_section_align = align_functions_log;
9440
9441 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9442 flags |= SECTION_MAIN;
9443 }
9444 else
9445 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9446
9447 if (TREE_PUBLIC (decl))
9448 flags |= SECTION_PUBLIC;
9449
9450 return flags;
9451 }
9452
9453 /* Generate a section name for decl and associate it with the
9454 declaration. */
9455
9456 static void
9457 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9458 {
9459 const char *name;
9460 int len;
9461
9462 if (!decl)
9463 abort ();
9464
9465 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9466 name = default_strip_name_encoding (name);
9467 len = strlen (name);
9468
9469 if (TREE_CODE (decl) == FUNCTION_DECL)
9470 {
9471 char *string;
9472
9473 /* It is essential that we prefix the section name here because
9474 otherwise the section names generated for constructors and
9475 destructors confuse collect2. */
9476
9477 string = alloca (len + 6);
9478 sprintf (string, "code@%s", name);
9479 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9480 }
9481 else if (TREE_PUBLIC (decl))
9482 DECL_SECTION_NAME (decl) = build_string (len, name);
9483 else
9484 {
9485 char *string;
9486
9487 string = alloca (len + 6);
9488 sprintf (string, "data@%s", name);
9489 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9490 }
9491 }
9492
9493 /* Switch to an arbitrary section NAME with attributes as specified
9494 by FLAGS. ALIGN specifies any known alignment requirements for
9495 the section; 0 if the default should be used. */
9496
9497 static void
9498 unicosmk_asm_named_section (const char *name, unsigned int flags)
9499 {
9500 const char *kind;
9501
9502 /* Close the previous section. */
9503
9504 fputs ("\t.endp\n\n", asm_out_file);
9505
9506 /* Find out what kind of section we are opening. */
9507
9508 if (flags & SECTION_MAIN)
9509 fputs ("\t.start\tmain\n", asm_out_file);
9510
9511 if (flags & SECTION_CODE)
9512 kind = "code";
9513 else if (flags & SECTION_PUBLIC)
9514 kind = "common";
9515 else
9516 kind = "data";
9517
9518 if (current_section_align != 0)
9519 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9520 current_section_align, kind);
9521 else
9522 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9523 }
9524
9525 static void
9526 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9527 {
9528 if (DECL_P (decl)
9529 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9530 unicosmk_unique_section (decl, 0);
9531 }
9532
9533 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9534 in code sections because .align fill unused space with zeroes. */
9535
9536 void
9537 unicosmk_output_align (FILE *file, int align)
9538 {
9539 if (inside_function)
9540 fprintf (file, "\tgcc@code@align\t%d\n", align);
9541 else
9542 fprintf (file, "\t.align\t%d\n", align);
9543 }
9544
9545 /* Add a case vector to the current function's list of deferred case
9546 vectors. Case vectors have to be put into a separate section because CAM
9547 does not allow data definitions in code sections. */
9548
9549 void
9550 unicosmk_defer_case_vector (rtx lab, rtx vec)
9551 {
9552 struct machine_function *machine = cfun->machine;
9553
9554 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9555 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9556 machine->addr_list);
9557 }
9558
9559 /* Output a case vector. */
9560
9561 static void
9562 unicosmk_output_addr_vec (FILE *file, rtx vec)
9563 {
9564 rtx lab = XEXP (vec, 0);
9565 rtx body = XEXP (vec, 1);
9566 int vlen = XVECLEN (body, 0);
9567 int idx;
9568
9569 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9570
9571 for (idx = 0; idx < vlen; idx++)
9572 {
9573 ASM_OUTPUT_ADDR_VEC_ELT
9574 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9575 }
9576 }
9577
9578 /* Output current function's deferred case vectors. */
9579
9580 static void
9581 unicosmk_output_deferred_case_vectors (FILE *file)
9582 {
9583 struct machine_function *machine = cfun->machine;
9584 rtx t;
9585
9586 if (machine->addr_list == NULL_RTX)
9587 return;
9588
9589 data_section ();
9590 for (t = machine->addr_list; t; t = XEXP (t, 1))
9591 unicosmk_output_addr_vec (file, XEXP (t, 0));
9592 }
9593
9594 /* Generate the name of the SSIB section for the current function. */
9595
9596 #define SSIB_PREFIX "__SSIB_"
9597 #define SSIB_PREFIX_LEN 7
9598
9599 static const char *
9600 unicosmk_ssib_name (void)
9601 {
9602 /* This is ok since CAM won't be able to deal with names longer than that
9603 anyway. */
9604
9605 static char name[256];
9606
9607 rtx x;
9608 const char *fnname;
9609 int len;
9610
9611 x = DECL_RTL (cfun->decl);
9612 if (GET_CODE (x) != MEM)
9613 abort ();
9614 x = XEXP (x, 0);
9615 if (GET_CODE (x) != SYMBOL_REF)
9616 abort ();
9617 fnname = XSTR (x, 0);
9618
9619 len = strlen (fnname);
9620 if (len + SSIB_PREFIX_LEN > 255)
9621 len = 255 - SSIB_PREFIX_LEN;
9622
9623 strcpy (name, SSIB_PREFIX);
9624 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
9625 name[len + SSIB_PREFIX_LEN] = 0;
9626
9627 return name;
9628 }
9629
9630 /* Set up the dynamic subprogram information block (DSIB) and update the
9631 frame pointer register ($15) for subroutines which have a frame. If the
9632 subroutine doesn't have a frame, simply increment $15. */
9633
9634 static void
9635 unicosmk_gen_dsib (unsigned long *imaskP)
9636 {
9637 if (alpha_procedure_type == PT_STACK)
9638 {
9639 const char *ssib_name;
9640 rtx mem;
9641
9642 /* Allocate 64 bytes for the DSIB. */
9643
9644 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9645 GEN_INT (-64))));
9646 emit_insn (gen_blockage ());
9647
9648 /* Save the return address. */
9649
9650 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
9651 set_mem_alias_set (mem, alpha_sr_alias_set);
9652 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
9653 (*imaskP) &= ~(1UL << REG_RA);
9654
9655 /* Save the old frame pointer. */
9656
9657 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
9658 set_mem_alias_set (mem, alpha_sr_alias_set);
9659 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
9660 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9661
9662 emit_insn (gen_blockage ());
9663
9664 /* Store the SSIB pointer. */
9665
9666 ssib_name = ggc_strdup (unicosmk_ssib_name ());
9667 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
9668 set_mem_alias_set (mem, alpha_sr_alias_set);
9669
9670 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
9671 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
9672 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
9673
9674 /* Save the CIW index. */
9675
9676 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
9677 set_mem_alias_set (mem, alpha_sr_alias_set);
9678 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
9679
9680 emit_insn (gen_blockage ());
9681
9682 /* Set the new frame pointer. */
9683
9684 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9685 stack_pointer_rtx, GEN_INT (64))));
9686
9687 }
9688 else
9689 {
9690 /* Increment the frame pointer register to indicate that we do not
9691 have a frame. */
9692
9693 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9694 hard_frame_pointer_rtx, const1_rtx)));
9695 }
9696 }
9697
9698 /* Output the static subroutine information block for the current
9699 function. */
9700
9701 static void
9702 unicosmk_output_ssib (FILE *file, const char *fnname)
9703 {
9704 int len;
9705 int i;
9706 rtx x;
9707 rtx ciw;
9708 struct machine_function *machine = cfun->machine;
9709
9710 ssib_section ();
9711 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
9712 unicosmk_ssib_name ());
9713
9714 /* Some required stuff and the function name length. */
9715
9716 len = strlen (fnname);
9717 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
9718
9719 /* Saved registers
9720 ??? We don't do that yet. */
9721
9722 fputs ("\t.quad\t0\n", file);
9723
9724 /* Function address. */
9725
9726 fputs ("\t.quad\t", file);
9727 assemble_name (file, fnname);
9728 putc ('\n', file);
9729
9730 fputs ("\t.quad\t0\n", file);
9731 fputs ("\t.quad\t0\n", file);
9732
9733 /* Function name.
9734 ??? We do it the same way Cray CC does it but this could be
9735 simplified. */
9736
9737 for( i = 0; i < len; i++ )
9738 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
9739 if( (len % 8) == 0 )
9740 fputs ("\t.quad\t0\n", file);
9741 else
9742 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
9743
9744 /* All call information words used in the function. */
9745
9746 for (x = machine->first_ciw; x; x = XEXP (x, 1))
9747 {
9748 ciw = XEXP (x, 0);
9749 #if HOST_BITS_PER_WIDE_INT == 32
9750 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
9751 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
9752 #else
9753 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
9754 #endif
9755 }
9756 }
9757
9758 /* Add a call information word (CIW) to the list of the current function's
9759 CIWs and return its index.
9760
9761 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
9762
9763 rtx
9764 unicosmk_add_call_info_word (rtx x)
9765 {
9766 rtx node;
9767 struct machine_function *machine = cfun->machine;
9768
9769 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
9770 if (machine->first_ciw == NULL_RTX)
9771 machine->first_ciw = node;
9772 else
9773 XEXP (machine->last_ciw, 1) = node;
9774
9775 machine->last_ciw = node;
9776 ++machine->ciw_count;
9777
9778 return GEN_INT (machine->ciw_count
9779 + strlen (current_function_name ())/8 + 5);
9780 }
9781
9782 static char unicosmk_section_buf[100];
9783
9784 char *
9785 unicosmk_text_section (void)
9786 {
9787 static int count = 0;
9788 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
9789 count++);
9790 return unicosmk_section_buf;
9791 }
9792
9793 char *
9794 unicosmk_data_section (void)
9795 {
9796 static int count = 1;
9797 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
9798 count++);
9799 return unicosmk_section_buf;
9800 }
9801
9802 /* The Cray assembler doesn't accept extern declarations for symbols which
9803 are defined in the same file. We have to keep track of all global
9804 symbols which are referenced and/or defined in a source file and output
9805 extern declarations for those which are referenced but not defined at
9806 the end of file. */
9807
9808 /* List of identifiers for which an extern declaration might have to be
9809 emitted. */
9810 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9811
9812 struct unicosmk_extern_list
9813 {
9814 struct unicosmk_extern_list *next;
9815 const char *name;
9816 };
9817
9818 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
9819
9820 /* Output extern declarations which are required for every asm file. */
9821
9822 static void
9823 unicosmk_output_default_externs (FILE *file)
9824 {
9825 static const char *const externs[] =
9826 { "__T3E_MISMATCH" };
9827
9828 int i;
9829 int n;
9830
9831 n = ARRAY_SIZE (externs);
9832
9833 for (i = 0; i < n; i++)
9834 fprintf (file, "\t.extern\t%s\n", externs[i]);
9835 }
9836
9837 /* Output extern declarations for global symbols which are have been
9838 referenced but not defined. */
9839
9840 static void
9841 unicosmk_output_externs (FILE *file)
9842 {
9843 struct unicosmk_extern_list *p;
9844 const char *real_name;
9845 int len;
9846 tree name_tree;
9847
9848 len = strlen (user_label_prefix);
9849 for (p = unicosmk_extern_head; p != 0; p = p->next)
9850 {
9851 /* We have to strip the encoding and possibly remove user_label_prefix
9852 from the identifier in order to handle -fleading-underscore and
9853 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
9854 real_name = default_strip_name_encoding (p->name);
9855 if (len && p->name[0] == '*'
9856 && !memcmp (real_name, user_label_prefix, len))
9857 real_name += len;
9858
9859 name_tree = get_identifier (real_name);
9860 if (! TREE_ASM_WRITTEN (name_tree))
9861 {
9862 TREE_ASM_WRITTEN (name_tree) = 1;
9863 fputs ("\t.extern\t", file);
9864 assemble_name (file, p->name);
9865 putc ('\n', file);
9866 }
9867 }
9868 }
9869
9870 /* Record an extern. */
9871
9872 void
9873 unicosmk_add_extern (const char *name)
9874 {
9875 struct unicosmk_extern_list *p;
9876
9877 p = (struct unicosmk_extern_list *)
9878 xmalloc (sizeof (struct unicosmk_extern_list));
9879 p->next = unicosmk_extern_head;
9880 p->name = name;
9881 unicosmk_extern_head = p;
9882 }
9883
9884 /* The Cray assembler generates incorrect code if identifiers which
9885 conflict with register names are used as instruction operands. We have
9886 to replace such identifiers with DEX expressions. */
9887
9888 /* Structure to collect identifiers which have been replaced by DEX
9889 expressions. */
9890 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9891
9892 struct unicosmk_dex {
9893 struct unicosmk_dex *next;
9894 const char *name;
9895 };
9896
9897 /* List of identifiers which have been replaced by DEX expressions. The DEX
9898 number is determined by the position in the list. */
9899
9900 static struct unicosmk_dex *unicosmk_dex_list = NULL;
9901
9902 /* The number of elements in the DEX list. */
9903
9904 static int unicosmk_dex_count = 0;
9905
9906 /* Check if NAME must be replaced by a DEX expression. */
9907
9908 static int
9909 unicosmk_special_name (const char *name)
9910 {
9911 if (name[0] == '*')
9912 ++name;
9913
9914 if (name[0] == '$')
9915 ++name;
9916
9917 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
9918 return 0;
9919
9920 switch (name[1])
9921 {
9922 case '1': case '2':
9923 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
9924
9925 case '3':
9926 return (name[2] == '\0'
9927 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
9928
9929 default:
9930 return (ISDIGIT (name[1]) && name[2] == '\0');
9931 }
9932 }
9933
9934 /* Return the DEX number if X must be replaced by a DEX expression and 0
9935 otherwise. */
9936
9937 static int
9938 unicosmk_need_dex (rtx x)
9939 {
9940 struct unicosmk_dex *dex;
9941 const char *name;
9942 int i;
9943
9944 if (GET_CODE (x) != SYMBOL_REF)
9945 return 0;
9946
9947 name = XSTR (x,0);
9948 if (! unicosmk_special_name (name))
9949 return 0;
9950
9951 i = unicosmk_dex_count;
9952 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9953 {
9954 if (! strcmp (name, dex->name))
9955 return i;
9956 --i;
9957 }
9958
9959 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9960 dex->name = name;
9961 dex->next = unicosmk_dex_list;
9962 unicosmk_dex_list = dex;
9963
9964 ++unicosmk_dex_count;
9965 return unicosmk_dex_count;
9966 }
9967
9968 /* Output the DEX definitions for this file. */
9969
9970 static void
9971 unicosmk_output_dex (FILE *file)
9972 {
9973 struct unicosmk_dex *dex;
9974 int i;
9975
9976 if (unicosmk_dex_list == NULL)
9977 return;
9978
9979 fprintf (file, "\t.dexstart\n");
9980
9981 i = unicosmk_dex_count;
9982 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9983 {
9984 fprintf (file, "\tDEX (%d) = ", i);
9985 assemble_name (file, dex->name);
9986 putc ('\n', file);
9987 --i;
9988 }
9989
9990 fprintf (file, "\t.dexend\n");
9991 }
9992
9993 /* Output text that to appear at the beginning of an assembler file. */
9994
9995 static void
9996 unicosmk_file_start (void)
9997 {
9998 int i;
9999
10000 fputs ("\t.ident\t", asm_out_file);
10001 unicosmk_output_module_name (asm_out_file);
10002 fputs ("\n\n", asm_out_file);
10003
10004 /* The Unicos/Mk assembler uses different register names. Instead of trying
10005 to support them, we simply use micro definitions. */
10006
10007 /* CAM has different register names: rN for the integer register N and fN
10008 for the floating-point register N. Instead of trying to use these in
10009 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10010 register. */
10011
10012 for (i = 0; i < 32; ++i)
10013 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10014
10015 for (i = 0; i < 32; ++i)
10016 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10017
10018 putc ('\n', asm_out_file);
10019
10020 /* The .align directive fill unused space with zeroes which does not work
10021 in code sections. We define the macro 'gcc@code@align' which uses nops
10022 instead. Note that it assumes that code sections always have the
10023 biggest possible alignment since . refers to the current offset from
10024 the beginning of the section. */
10025
10026 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10027 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10028 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10029 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10030 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10031 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10032 fputs ("\t.endr\n", asm_out_file);
10033 fputs ("\t.endif\n", asm_out_file);
10034 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10035
10036 /* Output extern declarations which should always be visible. */
10037 unicosmk_output_default_externs (asm_out_file);
10038
10039 /* Open a dummy section. We always need to be inside a section for the
10040 section-switching code to work correctly.
10041 ??? This should be a module id or something like that. I still have to
10042 figure out what the rules for those are. */
10043 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10044 }
10045
10046 /* Output text to appear at the end of an assembler file. This includes all
10047 pending extern declarations and DEX expressions. */
10048
10049 static void
10050 unicosmk_file_end (void)
10051 {
10052 fputs ("\t.endp\n\n", asm_out_file);
10053
10054 /* Output all pending externs. */
10055
10056 unicosmk_output_externs (asm_out_file);
10057
10058 /* Output dex definitions used for functions whose names conflict with
10059 register names. */
10060
10061 unicosmk_output_dex (asm_out_file);
10062
10063 fputs ("\t.end\t", asm_out_file);
10064 unicosmk_output_module_name (asm_out_file);
10065 putc ('\n', asm_out_file);
10066 }
10067
10068 #else
10069
10070 static void
10071 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10072 {}
10073
10074 static void
10075 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10076 {}
10077
10078 static void
10079 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10080 const char * fnname ATTRIBUTE_UNUSED)
10081 {}
10082
10083 rtx
10084 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10085 {
10086 return NULL_RTX;
10087 }
10088
10089 static int
10090 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10091 {
10092 return 0;
10093 }
10094
10095 #endif /* TARGET_ABI_UNICOSMK */
10096
10097 static void
10098 alpha_init_libfuncs (void)
10099 {
10100 if (TARGET_ABI_UNICOSMK)
10101 {
10102 /* Prevent gcc from generating calls to __divsi3. */
10103 set_optab_libfunc (sdiv_optab, SImode, 0);
10104 set_optab_libfunc (udiv_optab, SImode, 0);
10105
10106 /* Use the functions provided by the system library
10107 for DImode integer division. */
10108 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10109 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10110 }
10111 else if (TARGET_ABI_OPEN_VMS)
10112 {
10113 /* Use the VMS runtime library functions for division and
10114 remainder. */
10115 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10116 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10117 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10118 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10119 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10120 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10121 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10122 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10123 }
10124 }
10125
10126 \f
10127 /* Initialize the GCC target structure. */
10128 #if TARGET_ABI_OPEN_VMS
10129 # undef TARGET_ATTRIBUTE_TABLE
10130 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10131 # undef TARGET_SECTION_TYPE_FLAGS
10132 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10133 #endif
10134
10135 #undef TARGET_IN_SMALL_DATA_P
10136 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10137
10138 #if TARGET_ABI_UNICOSMK
10139 # undef TARGET_INSERT_ATTRIBUTES
10140 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10141 # undef TARGET_SECTION_TYPE_FLAGS
10142 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10143 # undef TARGET_ASM_UNIQUE_SECTION
10144 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10145 # undef TARGET_ASM_GLOBALIZE_LABEL
10146 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10147 #endif
10148
10149 #undef TARGET_ASM_ALIGNED_HI_OP
10150 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10151 #undef TARGET_ASM_ALIGNED_DI_OP
10152 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10153
10154 /* Default unaligned ops are provided for ELF systems. To get unaligned
10155 data for non-ELF systems, we have to turn off auto alignment. */
10156 #ifndef OBJECT_FORMAT_ELF
10157 #undef TARGET_ASM_UNALIGNED_HI_OP
10158 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10159 #undef TARGET_ASM_UNALIGNED_SI_OP
10160 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10161 #undef TARGET_ASM_UNALIGNED_DI_OP
10162 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10163 #endif
10164
10165 #ifdef OBJECT_FORMAT_ELF
10166 #undef TARGET_ASM_SELECT_RTX_SECTION
10167 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10168 #endif
10169
10170 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10171 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10172
10173 #undef TARGET_INIT_LIBFUNCS
10174 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10175
10176 #if TARGET_ABI_UNICOSMK
10177 #undef TARGET_ASM_FILE_START
10178 #define TARGET_ASM_FILE_START unicosmk_file_start
10179 #undef TARGET_ASM_FILE_END
10180 #define TARGET_ASM_FILE_END unicosmk_file_end
10181 #else
10182 #undef TARGET_ASM_FILE_START
10183 #define TARGET_ASM_FILE_START alpha_file_start
10184 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10185 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10186 #endif
10187
10188 #undef TARGET_SCHED_ADJUST_COST
10189 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10190 #undef TARGET_SCHED_ISSUE_RATE
10191 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10192 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
10193 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
10194 alpha_use_dfa_pipeline_interface
10195 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10196 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10197 alpha_multipass_dfa_lookahead
10198
10199 #undef TARGET_HAVE_TLS
10200 #define TARGET_HAVE_TLS HAVE_AS_TLS
10201
10202 #undef TARGET_INIT_BUILTINS
10203 #define TARGET_INIT_BUILTINS alpha_init_builtins
10204 #undef TARGET_EXPAND_BUILTIN
10205 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10206
10207 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10208 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10209 #undef TARGET_CANNOT_COPY_INSN_P
10210 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10211
10212 #if TARGET_ABI_OSF
10213 #undef TARGET_ASM_OUTPUT_MI_THUNK
10214 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10215 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10216 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10217 #endif
10218
10219 #undef TARGET_RTX_COSTS
10220 #define TARGET_RTX_COSTS alpha_rtx_costs
10221 #undef TARGET_ADDRESS_COST
10222 #define TARGET_ADDRESS_COST hook_int_rtx_0
10223
10224 #undef TARGET_MACHINE_DEPENDENT_REORG
10225 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10226
10227 #undef TARGET_PROMOTE_FUNCTION_ARGS
10228 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10229 #undef TARGET_PROMOTE_FUNCTION_RETURN
10230 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10231 #undef TARGET_PROMOTE_PROTOTYPES
10232 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10233 #undef TARGET_RETURN_IN_MEMORY
10234 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10235 #undef TARGET_SETUP_INCOMING_VARARGS
10236 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10237 #undef TARGET_STRICT_ARGUMENT_NAMING
10238 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10239 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10240 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10241
10242 #undef TARGET_BUILD_BUILTIN_VA_LIST
10243 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10244
10245 struct gcc_target targetm = TARGET_INITIALIZER;
10246
10247 \f
10248 #include "gt-alpha.h"
10249