re PR target/22225 (Tru64 UNIX testsuite failure: gcc.dg/vect/pr18536.c: ICE in in...
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58
59 /* Specify which cpu to schedule for. */
60 enum processor_type alpha_tune;
61
62 /* Which cpu we're generating code for. */
63 enum processor_type alpha_cpu;
64
65 static const char * const alpha_cpu_name[] =
66 {
67 "ev4", "ev5", "ev6"
68 };
69
70 /* Specify how accurate floating-point traps need to be. */
71
72 enum alpha_trap_precision alpha_tp;
73
74 /* Specify the floating-point rounding mode. */
75
76 enum alpha_fp_rounding_mode alpha_fprm;
77
78 /* Specify which things cause traps. */
79
80 enum alpha_fp_trap_mode alpha_fptm;
81
82 /* Save information from a "cmpxx" operation until the branch or scc is
83 emitted. */
84
85 struct alpha_compare alpha_compare;
86
87 /* Nonzero if inside of a function, because the Alpha asm can't
88 handle .files inside of functions. */
89
90 static int inside_function = FALSE;
91
92 /* The number of cycles of latency we should assume on memory reads. */
93
94 int alpha_memory_latency = 3;
95
96 /* Whether the function needs the GP. */
97
98 static int alpha_function_needs_gp;
99
100 /* The alias set for prologue/epilogue register save/restore. */
101
102 static GTY(()) int alpha_sr_alias_set;
103
104 /* The assembler name of the current function. */
105
106 static const char *alpha_fnname;
107
108 /* The next explicit relocation sequence number. */
109 extern GTY(()) int alpha_next_sequence_number;
110 int alpha_next_sequence_number = 1;
111
112 /* The literal and gpdisp sequence numbers for this insn, as printed
113 by %# and %* respectively. */
114 extern GTY(()) int alpha_this_literal_sequence_number;
115 extern GTY(()) int alpha_this_gpdisp_sequence_number;
116 int alpha_this_literal_sequence_number;
117 int alpha_this_gpdisp_sequence_number;
118
119 /* Costs of various operations on the different architectures. */
120
121 struct alpha_rtx_cost_data
122 {
123 unsigned char fp_add;
124 unsigned char fp_mult;
125 unsigned char fp_div_sf;
126 unsigned char fp_div_df;
127 unsigned char int_mult_si;
128 unsigned char int_mult_di;
129 unsigned char int_shift;
130 unsigned char int_cmov;
131 unsigned short int_div;
132 };
133
134 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
135 {
136 { /* EV4 */
137 COSTS_N_INSNS (6), /* fp_add */
138 COSTS_N_INSNS (6), /* fp_mult */
139 COSTS_N_INSNS (34), /* fp_div_sf */
140 COSTS_N_INSNS (63), /* fp_div_df */
141 COSTS_N_INSNS (23), /* int_mult_si */
142 COSTS_N_INSNS (23), /* int_mult_di */
143 COSTS_N_INSNS (2), /* int_shift */
144 COSTS_N_INSNS (2), /* int_cmov */
145 COSTS_N_INSNS (97), /* int_div */
146 },
147 { /* EV5 */
148 COSTS_N_INSNS (4), /* fp_add */
149 COSTS_N_INSNS (4), /* fp_mult */
150 COSTS_N_INSNS (15), /* fp_div_sf */
151 COSTS_N_INSNS (22), /* fp_div_df */
152 COSTS_N_INSNS (8), /* int_mult_si */
153 COSTS_N_INSNS (12), /* int_mult_di */
154 COSTS_N_INSNS (1) + 1, /* int_shift */
155 COSTS_N_INSNS (1), /* int_cmov */
156 COSTS_N_INSNS (83), /* int_div */
157 },
158 { /* EV6 */
159 COSTS_N_INSNS (4), /* fp_add */
160 COSTS_N_INSNS (4), /* fp_mult */
161 COSTS_N_INSNS (12), /* fp_div_sf */
162 COSTS_N_INSNS (15), /* fp_div_df */
163 COSTS_N_INSNS (7), /* int_mult_si */
164 COSTS_N_INSNS (7), /* int_mult_di */
165 COSTS_N_INSNS (1), /* int_shift */
166 COSTS_N_INSNS (2), /* int_cmov */
167 COSTS_N_INSNS (86), /* int_div */
168 },
169 };
170
171 /* Similar but tuned for code size instead of execution latency. The
172 extra +N is fractional cost tuning based on latency. It's used to
173 encourage use of cheaper insns like shift, but only if there's just
174 one of them. */
175
176 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
177 {
178 COSTS_N_INSNS (1), /* fp_add */
179 COSTS_N_INSNS (1), /* fp_mult */
180 COSTS_N_INSNS (1), /* fp_div_sf */
181 COSTS_N_INSNS (1) + 1, /* fp_div_df */
182 COSTS_N_INSNS (1) + 1, /* int_mult_si */
183 COSTS_N_INSNS (1) + 2, /* int_mult_di */
184 COSTS_N_INSNS (1), /* int_shift */
185 COSTS_N_INSNS (1), /* int_cmov */
186 COSTS_N_INSNS (6), /* int_div */
187 };
188
189 /* Get the number of args of a function in one of two ways. */
190 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
191 #define NUM_ARGS current_function_args_info.num_args
192 #else
193 #define NUM_ARGS current_function_args_info
194 #endif
195
196 #define REG_PV 27
197 #define REG_RA 26
198
199 /* Declarations of static functions. */
200 static struct machine_function *alpha_init_machine_status (void);
201 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
202
203 #if TARGET_ABI_OPEN_VMS
204 static void alpha_write_linkage (FILE *, const char *, tree);
205 #endif
206
207 static void unicosmk_output_deferred_case_vectors (FILE *);
208 static void unicosmk_gen_dsib (unsigned long *);
209 static void unicosmk_output_ssib (FILE *, const char *);
210 static int unicosmk_need_dex (rtx);
211 \f
212 /* Implement TARGET_HANDLE_OPTION. */
213
214 static bool
215 alpha_handle_option (size_t code, const char *arg, int value)
216 {
217 switch (code)
218 {
219 case OPT_mfp_regs:
220 if (value == 0)
221 target_flags |= MASK_SOFT_FP;
222 break;
223
224 case OPT_mieee:
225 case OPT_mieee_with_inexact:
226 target_flags |= MASK_IEEE_CONFORMANT;
227 break;
228
229 case OPT_mtls_size_:
230 if (value != 16 && value != 32 && value != 64)
231 error ("bad value %qs for -mtls-size switch", arg);
232 break;
233 }
234
235 return true;
236 }
237
238 /* Parse target option strings. */
239
240 void
241 override_options (void)
242 {
243 static const struct cpu_table {
244 const char *const name;
245 const enum processor_type processor;
246 const int flags;
247 } cpu_table[] = {
248 { "ev4", PROCESSOR_EV4, 0 },
249 { "ev45", PROCESSOR_EV4, 0 },
250 { "21064", PROCESSOR_EV4, 0 },
251 { "ev5", PROCESSOR_EV5, 0 },
252 { "21164", PROCESSOR_EV5, 0 },
253 { "ev56", PROCESSOR_EV5, MASK_BWX },
254 { "21164a", PROCESSOR_EV5, MASK_BWX },
255 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
256 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
257 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
258 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
259 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
260 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
261 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
262 { 0, 0, 0 }
263 };
264
265 int i;
266
267 /* Unicos/Mk doesn't have shared libraries. */
268 if (TARGET_ABI_UNICOSMK && flag_pic)
269 {
270 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
271 (flag_pic > 1) ? "PIC" : "pic");
272 flag_pic = 0;
273 }
274
275 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
276 floating-point instructions. Make that the default for this target. */
277 if (TARGET_ABI_UNICOSMK)
278 alpha_fprm = ALPHA_FPRM_DYN;
279 else
280 alpha_fprm = ALPHA_FPRM_NORM;
281
282 alpha_tp = ALPHA_TP_PROG;
283 alpha_fptm = ALPHA_FPTM_N;
284
285 /* We cannot use su and sui qualifiers for conversion instructions on
286 Unicos/Mk. I'm not sure if this is due to assembler or hardware
287 limitations. Right now, we issue a warning if -mieee is specified
288 and then ignore it; eventually, we should either get it right or
289 disable the option altogether. */
290
291 if (TARGET_IEEE)
292 {
293 if (TARGET_ABI_UNICOSMK)
294 warning (0, "-mieee not supported on Unicos/Mk");
295 else
296 {
297 alpha_tp = ALPHA_TP_INSN;
298 alpha_fptm = ALPHA_FPTM_SU;
299 }
300 }
301
302 if (TARGET_IEEE_WITH_INEXACT)
303 {
304 if (TARGET_ABI_UNICOSMK)
305 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
306 else
307 {
308 alpha_tp = ALPHA_TP_INSN;
309 alpha_fptm = ALPHA_FPTM_SUI;
310 }
311 }
312
313 if (alpha_tp_string)
314 {
315 if (! strcmp (alpha_tp_string, "p"))
316 alpha_tp = ALPHA_TP_PROG;
317 else if (! strcmp (alpha_tp_string, "f"))
318 alpha_tp = ALPHA_TP_FUNC;
319 else if (! strcmp (alpha_tp_string, "i"))
320 alpha_tp = ALPHA_TP_INSN;
321 else
322 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
323 }
324
325 if (alpha_fprm_string)
326 {
327 if (! strcmp (alpha_fprm_string, "n"))
328 alpha_fprm = ALPHA_FPRM_NORM;
329 else if (! strcmp (alpha_fprm_string, "m"))
330 alpha_fprm = ALPHA_FPRM_MINF;
331 else if (! strcmp (alpha_fprm_string, "c"))
332 alpha_fprm = ALPHA_FPRM_CHOP;
333 else if (! strcmp (alpha_fprm_string,"d"))
334 alpha_fprm = ALPHA_FPRM_DYN;
335 else
336 error ("bad value %qs for -mfp-rounding-mode switch",
337 alpha_fprm_string);
338 }
339
340 if (alpha_fptm_string)
341 {
342 if (strcmp (alpha_fptm_string, "n") == 0)
343 alpha_fptm = ALPHA_FPTM_N;
344 else if (strcmp (alpha_fptm_string, "u") == 0)
345 alpha_fptm = ALPHA_FPTM_U;
346 else if (strcmp (alpha_fptm_string, "su") == 0)
347 alpha_fptm = ALPHA_FPTM_SU;
348 else if (strcmp (alpha_fptm_string, "sui") == 0)
349 alpha_fptm = ALPHA_FPTM_SUI;
350 else
351 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
352 }
353
354 if (alpha_cpu_string)
355 {
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
358 {
359 alpha_tune = alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
361 target_flags |= cpu_table [i].flags;
362 break;
363 }
364 if (! cpu_table [i].name)
365 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
366 }
367
368 if (alpha_tune_string)
369 {
370 for (i = 0; cpu_table [i].name; i++)
371 if (! strcmp (alpha_tune_string, cpu_table [i].name))
372 {
373 alpha_tune = cpu_table [i].processor;
374 break;
375 }
376 if (! cpu_table [i].name)
377 error ("bad value %qs for -mcpu switch", alpha_tune_string);
378 }
379
380 /* Do some sanity checks on the above options. */
381
382 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
383 {
384 warning (0, "trap mode not supported on Unicos/Mk");
385 alpha_fptm = ALPHA_FPTM_N;
386 }
387
388 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
389 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
390 {
391 warning (0, "fp software completion requires -mtrap-precision=i");
392 alpha_tp = ALPHA_TP_INSN;
393 }
394
395 if (alpha_cpu == PROCESSOR_EV6)
396 {
397 /* Except for EV6 pass 1 (not released), we always have precise
398 arithmetic traps. Which means we can do software completion
399 without minding trap shadows. */
400 alpha_tp = ALPHA_TP_PROG;
401 }
402
403 if (TARGET_FLOAT_VAX)
404 {
405 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
406 {
407 warning (0, "rounding mode not supported for VAX floats");
408 alpha_fprm = ALPHA_FPRM_NORM;
409 }
410 if (alpha_fptm == ALPHA_FPTM_SUI)
411 {
412 warning (0, "trap mode not supported for VAX floats");
413 alpha_fptm = ALPHA_FPTM_SU;
414 }
415 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
416 warning (0, "128-bit long double not supported for VAX floats");
417 target_flags &= ~MASK_LONG_DOUBLE_128;
418 }
419
420 {
421 char *end;
422 int lat;
423
424 if (!alpha_mlat_string)
425 alpha_mlat_string = "L1";
426
427 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
428 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
429 ;
430 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
431 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
432 && alpha_mlat_string[2] == '\0')
433 {
434 static int const cache_latency[][4] =
435 {
436 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
437 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
438 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
439 };
440
441 lat = alpha_mlat_string[1] - '0';
442 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
443 {
444 warning (0, "L%d cache latency unknown for %s",
445 lat, alpha_cpu_name[alpha_tune]);
446 lat = 3;
447 }
448 else
449 lat = cache_latency[alpha_tune][lat-1];
450 }
451 else if (! strcmp (alpha_mlat_string, "main"))
452 {
453 /* Most current memories have about 370ns latency. This is
454 a reasonable guess for a fast cpu. */
455 lat = 150;
456 }
457 else
458 {
459 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
460 lat = 3;
461 }
462
463 alpha_memory_latency = lat;
464 }
465
466 /* Default the definition of "small data" to 8 bytes. */
467 if (!g_switch_set)
468 g_switch_value = 8;
469
470 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
471 if (flag_pic == 1)
472 target_flags |= MASK_SMALL_DATA;
473 else if (flag_pic == 2)
474 target_flags &= ~MASK_SMALL_DATA;
475
476 /* Align labels and loops for optimal branching. */
477 /* ??? Kludge these by not doing anything if we don't optimize and also if
478 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
479 if (optimize > 0 && write_symbols != SDB_DEBUG)
480 {
481 if (align_loops <= 0)
482 align_loops = 16;
483 if (align_jumps <= 0)
484 align_jumps = 16;
485 }
486 if (align_functions <= 0)
487 align_functions = 16;
488
489 /* Acquire a unique set number for our register saves and restores. */
490 alpha_sr_alias_set = new_alias_set ();
491
492 /* Register variables and functions with the garbage collector. */
493
494 /* Set up function hooks. */
495 init_machine_status = alpha_init_machine_status;
496
497 /* Tell the compiler when we're using VAX floating point. */
498 if (TARGET_FLOAT_VAX)
499 {
500 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
501 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
502 REAL_MODE_FORMAT (TFmode) = NULL;
503 }
504 }
505 \f
506 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
507
508 int
509 zap_mask (HOST_WIDE_INT value)
510 {
511 int i;
512
513 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
514 i++, value >>= 8)
515 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
516 return 0;
517
518 return 1;
519 }
520
521 /* Return true if OP is valid for a particular TLS relocation.
522 We are already guaranteed that OP is a CONST. */
523
524 int
525 tls_symbolic_operand_1 (rtx op, int size, int unspec)
526 {
527 op = XEXP (op, 0);
528
529 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
530 return 0;
531 op = XVECEXP (op, 0, 0);
532
533 if (GET_CODE (op) != SYMBOL_REF)
534 return 0;
535
536 if (SYMBOL_REF_LOCAL_P (op))
537 {
538 if (alpha_tls_size > size)
539 return 0;
540 }
541 else
542 {
543 if (size != 64)
544 return 0;
545 }
546
547 switch (SYMBOL_REF_TLS_MODEL (op))
548 {
549 case TLS_MODEL_LOCAL_DYNAMIC:
550 return unspec == UNSPEC_DTPREL;
551 case TLS_MODEL_INITIAL_EXEC:
552 return unspec == UNSPEC_TPREL && size == 64;
553 case TLS_MODEL_LOCAL_EXEC:
554 return unspec == UNSPEC_TPREL;
555 default:
556 gcc_unreachable ();
557 }
558 }
559
560 /* Used by aligned_memory_operand and unaligned_memory_operand to
561 resolve what reload is going to do with OP if it's a register. */
562
563 rtx
564 resolve_reload_operand (rtx op)
565 {
566 if (reload_in_progress)
567 {
568 rtx tmp = op;
569 if (GET_CODE (tmp) == SUBREG)
570 tmp = SUBREG_REG (tmp);
571 if (GET_CODE (tmp) == REG
572 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
573 {
574 op = reg_equiv_memory_loc[REGNO (tmp)];
575 if (op == 0)
576 return 0;
577 }
578 }
579 return op;
580 }
581
582 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
583 the range defined for C in [I-P]. */
584
585 bool
586 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
587 {
588 switch (c)
589 {
590 case 'I':
591 /* An unsigned 8 bit constant. */
592 return (unsigned HOST_WIDE_INT) value < 0x100;
593 case 'J':
594 /* The constant zero. */
595 return value == 0;
596 case 'K':
597 /* A signed 16 bit constant. */
598 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
599 case 'L':
600 /* A shifted signed 16 bit constant appropriate for LDAH. */
601 return ((value & 0xffff) == 0
602 && ((value) >> 31 == -1 || value >> 31 == 0));
603 case 'M':
604 /* A constant that can be AND'ed with using a ZAP insn. */
605 return zap_mask (value);
606 case 'N':
607 /* A complemented unsigned 8 bit constant. */
608 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
609 case 'O':
610 /* A negated unsigned 8 bit constant. */
611 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
612 case 'P':
613 /* The constant 1, 2 or 3. */
614 return value == 1 || value == 2 || value == 3;
615
616 default:
617 return false;
618 }
619 }
620
621 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
622 matches for C in [GH]. */
623
624 bool
625 alpha_const_double_ok_for_letter_p (rtx value, int c)
626 {
627 switch (c)
628 {
629 case 'G':
630 /* The floating point zero constant. */
631 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
632 && value == CONST0_RTX (GET_MODE (value)));
633
634 case 'H':
635 /* A valid operand of a ZAP insn. */
636 return (GET_MODE (value) == VOIDmode
637 && zap_mask (CONST_DOUBLE_LOW (value))
638 && zap_mask (CONST_DOUBLE_HIGH (value)));
639
640 default:
641 return false;
642 }
643 }
644
645 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
646 matches for C. */
647
648 bool
649 alpha_extra_constraint (rtx value, int c)
650 {
651 switch (c)
652 {
653 case 'Q':
654 return normal_memory_operand (value, VOIDmode);
655 case 'R':
656 return direct_call_operand (value, Pmode);
657 case 'S':
658 return (GET_CODE (value) == CONST_INT
659 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
660 case 'T':
661 return GET_CODE (value) == HIGH;
662 case 'U':
663 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
664 case 'W':
665 return (GET_CODE (value) == CONST_VECTOR
666 && value == CONST0_RTX (GET_MODE (value)));
667 default:
668 return false;
669 }
670 }
671
672 /* The scalar modes supported differs from the default check-what-c-supports
673 version in that sometimes TFmode is available even when long double
674 indicates only DFmode. On unicosmk, we have the situation that HImode
675 doesn't map to any C type, but of course we still support that. */
676
677 static bool
678 alpha_scalar_mode_supported_p (enum machine_mode mode)
679 {
680 switch (mode)
681 {
682 case QImode:
683 case HImode:
684 case SImode:
685 case DImode:
686 case TImode: /* via optabs.c */
687 return true;
688
689 case SFmode:
690 case DFmode:
691 return true;
692
693 case TFmode:
694 return TARGET_HAS_XFLOATING_LIBS;
695
696 default:
697 return false;
698 }
699 }
700
701 /* Alpha implements a couple of integer vector mode operations when
702 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
703 which allows the vectorizer to operate on e.g. move instructions,
704 or when expand_vector_operations can do something useful. */
705
706 static bool
707 alpha_vector_mode_supported_p (enum machine_mode mode)
708 {
709 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
710 }
711
712 /* Return 1 if this function can directly return via $26. */
713
714 int
715 direct_return (void)
716 {
717 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
718 && reload_completed
719 && alpha_sa_size () == 0
720 && get_frame_size () == 0
721 && current_function_outgoing_args_size == 0
722 && current_function_pretend_args_size == 0);
723 }
724
725 /* Return the ADDR_VEC associated with a tablejump insn. */
726
727 rtx
728 alpha_tablejump_addr_vec (rtx insn)
729 {
730 rtx tmp;
731
732 tmp = JUMP_LABEL (insn);
733 if (!tmp)
734 return NULL_RTX;
735 tmp = NEXT_INSN (tmp);
736 if (!tmp)
737 return NULL_RTX;
738 if (GET_CODE (tmp) == JUMP_INSN
739 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
740 return PATTERN (tmp);
741 return NULL_RTX;
742 }
743
744 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
745
746 rtx
747 alpha_tablejump_best_label (rtx insn)
748 {
749 rtx jump_table = alpha_tablejump_addr_vec (insn);
750 rtx best_label = NULL_RTX;
751
752 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
753 there for edge frequency counts from profile data. */
754
755 if (jump_table)
756 {
757 int n_labels = XVECLEN (jump_table, 1);
758 int best_count = -1;
759 int i, j;
760
761 for (i = 0; i < n_labels; i++)
762 {
763 int count = 1;
764
765 for (j = i + 1; j < n_labels; j++)
766 if (XEXP (XVECEXP (jump_table, 1, i), 0)
767 == XEXP (XVECEXP (jump_table, 1, j), 0))
768 count++;
769
770 if (count > best_count)
771 best_count = count, best_label = XVECEXP (jump_table, 1, i);
772 }
773 }
774
775 return best_label ? best_label : const0_rtx;
776 }
777
778 /* Return the TLS model to use for SYMBOL. */
779
780 static enum tls_model
781 tls_symbolic_operand_type (rtx symbol)
782 {
783 enum tls_model model;
784
785 if (GET_CODE (symbol) != SYMBOL_REF)
786 return 0;
787 model = SYMBOL_REF_TLS_MODEL (symbol);
788
789 /* Local-exec with a 64-bit size is the same code as initial-exec. */
790 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
791 model = TLS_MODEL_INITIAL_EXEC;
792
793 return model;
794 }
795 \f
796 /* Return true if the function DECL will share the same GP as any
797 function in the current unit of translation. */
798
799 static bool
800 decl_has_samegp (tree decl)
801 {
802 /* Functions that are not local can be overridden, and thus may
803 not share the same gp. */
804 if (!(*targetm.binds_local_p) (decl))
805 return false;
806
807 /* If -msmall-data is in effect, assume that there is only one GP
808 for the module, and so any local symbol has this property. We
809 need explicit relocations to be able to enforce this for symbols
810 not defined in this unit of translation, however. */
811 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
812 return true;
813
814 /* Functions that are not external are defined in this UoT. */
815 /* ??? Irritatingly, static functions not yet emitted are still
816 marked "external". Apply this to non-static functions only. */
817 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
818 }
819
820 /* Return true if EXP should be placed in the small data section. */
821
822 static bool
823 alpha_in_small_data_p (tree exp)
824 {
825 /* We want to merge strings, so we never consider them small data. */
826 if (TREE_CODE (exp) == STRING_CST)
827 return false;
828
829 /* Functions are never in the small data area. Duh. */
830 if (TREE_CODE (exp) == FUNCTION_DECL)
831 return false;
832
833 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
834 {
835 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
836 if (strcmp (section, ".sdata") == 0
837 || strcmp (section, ".sbss") == 0)
838 return true;
839 }
840 else
841 {
842 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
843
844 /* If this is an incomplete type with size 0, then we can't put it
845 in sdata because it might be too big when completed. */
846 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
847 return true;
848 }
849
850 return false;
851 }
852
853 #if TARGET_ABI_OPEN_VMS
854 static bool
855 alpha_linkage_symbol_p (const char *symname)
856 {
857 int symlen = strlen (symname);
858
859 if (symlen > 4)
860 return strcmp (&symname [symlen - 4], "..lk") == 0;
861
862 return false;
863 }
864
865 #define LINKAGE_SYMBOL_REF_P(X) \
866 ((GET_CODE (X) == SYMBOL_REF \
867 && alpha_linkage_symbol_p (XSTR (X, 0))) \
868 || (GET_CODE (X) == CONST \
869 && GET_CODE (XEXP (X, 0)) == PLUS \
870 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
871 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
872 #endif
873
874 /* legitimate_address_p recognizes an RTL expression that is a valid
875 memory address for an instruction. The MODE argument is the
876 machine mode for the MEM expression that wants to use this address.
877
878 For Alpha, we have either a constant address or the sum of a
879 register and a constant address, or just a register. For DImode,
880 any of those forms can be surrounded with an AND that clear the
881 low-order three bits; this is an "unaligned" access. */
882
883 bool
884 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
885 {
886 /* If this is an ldq_u type address, discard the outer AND. */
887 if (mode == DImode
888 && GET_CODE (x) == AND
889 && GET_CODE (XEXP (x, 1)) == CONST_INT
890 && INTVAL (XEXP (x, 1)) == -8)
891 x = XEXP (x, 0);
892
893 /* Discard non-paradoxical subregs. */
894 if (GET_CODE (x) == SUBREG
895 && (GET_MODE_SIZE (GET_MODE (x))
896 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
897 x = SUBREG_REG (x);
898
899 /* Unadorned general registers are valid. */
900 if (REG_P (x)
901 && (strict
902 ? STRICT_REG_OK_FOR_BASE_P (x)
903 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
904 return true;
905
906 /* Constant addresses (i.e. +/- 32k) are valid. */
907 if (CONSTANT_ADDRESS_P (x))
908 return true;
909
910 #if TARGET_ABI_OPEN_VMS
911 if (LINKAGE_SYMBOL_REF_P (x))
912 return true;
913 #endif
914
915 /* Register plus a small constant offset is valid. */
916 if (GET_CODE (x) == PLUS)
917 {
918 rtx ofs = XEXP (x, 1);
919 x = XEXP (x, 0);
920
921 /* Discard non-paradoxical subregs. */
922 if (GET_CODE (x) == SUBREG
923 && (GET_MODE_SIZE (GET_MODE (x))
924 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
925 x = SUBREG_REG (x);
926
927 if (REG_P (x))
928 {
929 if (! strict
930 && NONSTRICT_REG_OK_FP_BASE_P (x)
931 && GET_CODE (ofs) == CONST_INT)
932 return true;
933 if ((strict
934 ? STRICT_REG_OK_FOR_BASE_P (x)
935 : NONSTRICT_REG_OK_FOR_BASE_P (x))
936 && CONSTANT_ADDRESS_P (ofs))
937 return true;
938 }
939 }
940
941 /* If we're managing explicit relocations, LO_SUM is valid, as
942 are small data symbols. */
943 else if (TARGET_EXPLICIT_RELOCS)
944 {
945 if (small_symbolic_operand (x, Pmode))
946 return true;
947
948 if (GET_CODE (x) == LO_SUM)
949 {
950 rtx ofs = XEXP (x, 1);
951 x = XEXP (x, 0);
952
953 /* Discard non-paradoxical subregs. */
954 if (GET_CODE (x) == SUBREG
955 && (GET_MODE_SIZE (GET_MODE (x))
956 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
957 x = SUBREG_REG (x);
958
959 /* Must have a valid base register. */
960 if (! (REG_P (x)
961 && (strict
962 ? STRICT_REG_OK_FOR_BASE_P (x)
963 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
964 return false;
965
966 /* The symbol must be local. */
967 if (local_symbolic_operand (ofs, Pmode)
968 || dtp32_symbolic_operand (ofs, Pmode)
969 || tp32_symbolic_operand (ofs, Pmode))
970 return true;
971 }
972 }
973
974 return false;
975 }
976
977 /* Build the SYMBOL_REF for __tls_get_addr. */
978
979 static GTY(()) rtx tls_get_addr_libfunc;
980
981 static rtx
982 get_tls_get_addr (void)
983 {
984 if (!tls_get_addr_libfunc)
985 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
986 return tls_get_addr_libfunc;
987 }
988
989 /* Try machine-dependent ways of modifying an illegitimate address
990 to be legitimate. If we find one, return the new, valid address. */
991
992 rtx
993 alpha_legitimize_address (rtx x, rtx scratch,
994 enum machine_mode mode ATTRIBUTE_UNUSED)
995 {
996 HOST_WIDE_INT addend;
997
998 /* If the address is (plus reg const_int) and the CONST_INT is not a
999 valid offset, compute the high part of the constant and add it to
1000 the register. Then our address is (plus temp low-part-const). */
1001 if (GET_CODE (x) == PLUS
1002 && GET_CODE (XEXP (x, 0)) == REG
1003 && GET_CODE (XEXP (x, 1)) == CONST_INT
1004 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1005 {
1006 addend = INTVAL (XEXP (x, 1));
1007 x = XEXP (x, 0);
1008 goto split_addend;
1009 }
1010
1011 /* If the address is (const (plus FOO const_int)), find the low-order
1012 part of the CONST_INT. Then load FOO plus any high-order part of the
1013 CONST_INT into a register. Our address is (plus reg low-part-const).
1014 This is done to reduce the number of GOT entries. */
1015 if (!no_new_pseudos
1016 && GET_CODE (x) == CONST
1017 && GET_CODE (XEXP (x, 0)) == PLUS
1018 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1019 {
1020 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1021 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1022 goto split_addend;
1023 }
1024
1025 /* If we have a (plus reg const), emit the load as in (2), then add
1026 the two registers, and finally generate (plus reg low-part-const) as
1027 our address. */
1028 if (!no_new_pseudos
1029 && GET_CODE (x) == PLUS
1030 && GET_CODE (XEXP (x, 0)) == REG
1031 && GET_CODE (XEXP (x, 1)) == CONST
1032 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1033 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1034 {
1035 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1036 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1037 XEXP (XEXP (XEXP (x, 1), 0), 0),
1038 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1039 goto split_addend;
1040 }
1041
1042 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1043 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1044 {
1045 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1046
1047 switch (tls_symbolic_operand_type (x))
1048 {
1049 case TLS_MODEL_NONE:
1050 break;
1051
1052 case TLS_MODEL_GLOBAL_DYNAMIC:
1053 start_sequence ();
1054
1055 r0 = gen_rtx_REG (Pmode, 0);
1056 r16 = gen_rtx_REG (Pmode, 16);
1057 tga = get_tls_get_addr ();
1058 dest = gen_reg_rtx (Pmode);
1059 seq = GEN_INT (alpha_next_sequence_number++);
1060
1061 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1062 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1063 insn = emit_call_insn (insn);
1064 CONST_OR_PURE_CALL_P (insn) = 1;
1065 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1066
1067 insn = get_insns ();
1068 end_sequence ();
1069
1070 emit_libcall_block (insn, dest, r0, x);
1071 return dest;
1072
1073 case TLS_MODEL_LOCAL_DYNAMIC:
1074 start_sequence ();
1075
1076 r0 = gen_rtx_REG (Pmode, 0);
1077 r16 = gen_rtx_REG (Pmode, 16);
1078 tga = get_tls_get_addr ();
1079 scratch = gen_reg_rtx (Pmode);
1080 seq = GEN_INT (alpha_next_sequence_number++);
1081
1082 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1083 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1084 insn = emit_call_insn (insn);
1085 CONST_OR_PURE_CALL_P (insn) = 1;
1086 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1087
1088 insn = get_insns ();
1089 end_sequence ();
1090
1091 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1092 UNSPEC_TLSLDM_CALL);
1093 emit_libcall_block (insn, scratch, r0, eqv);
1094
1095 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1096 eqv = gen_rtx_CONST (Pmode, eqv);
1097
1098 if (alpha_tls_size == 64)
1099 {
1100 dest = gen_reg_rtx (Pmode);
1101 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1102 emit_insn (gen_adddi3 (dest, dest, scratch));
1103 return dest;
1104 }
1105 if (alpha_tls_size == 32)
1106 {
1107 insn = gen_rtx_HIGH (Pmode, eqv);
1108 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1109 scratch = gen_reg_rtx (Pmode);
1110 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1111 }
1112 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1113
1114 case TLS_MODEL_INITIAL_EXEC:
1115 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1116 eqv = gen_rtx_CONST (Pmode, eqv);
1117 tp = gen_reg_rtx (Pmode);
1118 scratch = gen_reg_rtx (Pmode);
1119 dest = gen_reg_rtx (Pmode);
1120
1121 emit_insn (gen_load_tp (tp));
1122 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1123 emit_insn (gen_adddi3 (dest, tp, scratch));
1124 return dest;
1125
1126 case TLS_MODEL_LOCAL_EXEC:
1127 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1128 eqv = gen_rtx_CONST (Pmode, eqv);
1129 tp = gen_reg_rtx (Pmode);
1130
1131 emit_insn (gen_load_tp (tp));
1132 if (alpha_tls_size == 32)
1133 {
1134 insn = gen_rtx_HIGH (Pmode, eqv);
1135 insn = gen_rtx_PLUS (Pmode, tp, insn);
1136 tp = gen_reg_rtx (Pmode);
1137 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1138 }
1139 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1140
1141 default:
1142 gcc_unreachable ();
1143 }
1144
1145 if (local_symbolic_operand (x, Pmode))
1146 {
1147 if (small_symbolic_operand (x, Pmode))
1148 return x;
1149 else
1150 {
1151 if (!no_new_pseudos)
1152 scratch = gen_reg_rtx (Pmode);
1153 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1154 gen_rtx_HIGH (Pmode, x)));
1155 return gen_rtx_LO_SUM (Pmode, scratch, x);
1156 }
1157 }
1158 }
1159
1160 return NULL;
1161
1162 split_addend:
1163 {
1164 HOST_WIDE_INT low, high;
1165
1166 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1167 addend -= low;
1168 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1169 addend -= high;
1170
1171 if (addend)
1172 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1173 (no_new_pseudos ? scratch : NULL_RTX),
1174 1, OPTAB_LIB_WIDEN);
1175 if (high)
1176 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1177 (no_new_pseudos ? scratch : NULL_RTX),
1178 1, OPTAB_LIB_WIDEN);
1179
1180 return plus_constant (x, low);
1181 }
1182 }
1183
1184 /* Primarily this is required for TLS symbols, but given that our move
1185 patterns *ought* to be able to handle any symbol at any time, we
1186 should never be spilling symbolic operands to the constant pool, ever. */
1187
1188 static bool
1189 alpha_cannot_force_const_mem (rtx x)
1190 {
1191 enum rtx_code code = GET_CODE (x);
1192 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1193 }
1194
1195 /* We do not allow indirect calls to be optimized into sibling calls, nor
1196 can we allow a call to a function with a different GP to be optimized
1197 into a sibcall. */
1198
1199 static bool
1200 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1201 {
1202 /* Can't do indirect tail calls, since we don't know if the target
1203 uses the same GP. */
1204 if (!decl)
1205 return false;
1206
1207 /* Otherwise, we can make a tail call if the target function shares
1208 the same GP. */
1209 return decl_has_samegp (decl);
1210 }
1211
1212 int
1213 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1214 {
1215 rtx x = *px;
1216
1217 /* Don't re-split. */
1218 if (GET_CODE (x) == LO_SUM)
1219 return -1;
1220
1221 return small_symbolic_operand (x, Pmode) != 0;
1222 }
1223
1224 static int
1225 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1226 {
1227 rtx x = *px;
1228
1229 /* Don't re-split. */
1230 if (GET_CODE (x) == LO_SUM)
1231 return -1;
1232
1233 if (small_symbolic_operand (x, Pmode))
1234 {
1235 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1236 *px = x;
1237 return -1;
1238 }
1239
1240 return 0;
1241 }
1242
1243 rtx
1244 split_small_symbolic_operand (rtx x)
1245 {
1246 x = copy_insn (x);
1247 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1248 return x;
1249 }
1250
1251 /* Indicate that INSN cannot be duplicated. This is true for any insn
1252 that we've marked with gpdisp relocs, since those have to stay in
1253 1-1 correspondence with one another.
1254
1255 Technically we could copy them if we could set up a mapping from one
1256 sequence number to another, across the set of insns to be duplicated.
1257 This seems overly complicated and error-prone since interblock motion
1258 from sched-ebb could move one of the pair of insns to a different block.
1259
1260 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1261 then they'll be in a different block from their ldgp. Which could lead
1262 the bb reorder code to think that it would be ok to copy just the block
1263 containing the call and branch to the block containing the ldgp. */
1264
1265 static bool
1266 alpha_cannot_copy_insn_p (rtx insn)
1267 {
1268 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1269 return false;
1270 if (recog_memoized (insn) >= 0)
1271 return get_attr_cannot_copy (insn);
1272 else
1273 return false;
1274 }
1275
1276
1277 /* Try a machine-dependent way of reloading an illegitimate address
1278 operand. If we find one, push the reload and return the new rtx. */
1279
1280 rtx
1281 alpha_legitimize_reload_address (rtx x,
1282 enum machine_mode mode ATTRIBUTE_UNUSED,
1283 int opnum, int type,
1284 int ind_levels ATTRIBUTE_UNUSED)
1285 {
1286 /* We must recognize output that we have already generated ourselves. */
1287 if (GET_CODE (x) == PLUS
1288 && GET_CODE (XEXP (x, 0)) == PLUS
1289 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1290 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1291 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1292 {
1293 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1294 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1295 opnum, type);
1296 return x;
1297 }
1298
1299 /* We wish to handle large displacements off a base register by
1300 splitting the addend across an ldah and the mem insn. This
1301 cuts number of extra insns needed from 3 to 1. */
1302 if (GET_CODE (x) == PLUS
1303 && GET_CODE (XEXP (x, 0)) == REG
1304 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1305 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1306 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1307 {
1308 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1309 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1310 HOST_WIDE_INT high
1311 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1312
1313 /* Check for 32-bit overflow. */
1314 if (high + low != val)
1315 return NULL_RTX;
1316
1317 /* Reload the high part into a base reg; leave the low part
1318 in the mem directly. */
1319 x = gen_rtx_PLUS (GET_MODE (x),
1320 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1321 GEN_INT (high)),
1322 GEN_INT (low));
1323
1324 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1325 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1326 opnum, type);
1327 return x;
1328 }
1329
1330 return NULL_RTX;
1331 }
1332 \f
1333 /* Compute a (partial) cost for rtx X. Return true if the complete
1334 cost has been computed, and false if subexpressions should be
1335 scanned. In either case, *TOTAL contains the cost result. */
1336
1337 static bool
1338 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1339 {
1340 enum machine_mode mode = GET_MODE (x);
1341 bool float_mode_p = FLOAT_MODE_P (mode);
1342 const struct alpha_rtx_cost_data *cost_data;
1343
1344 if (optimize_size)
1345 cost_data = &alpha_rtx_cost_size;
1346 else
1347 cost_data = &alpha_rtx_cost_data[alpha_tune];
1348
1349 switch (code)
1350 {
1351 case CONST_INT:
1352 /* If this is an 8-bit constant, return zero since it can be used
1353 nearly anywhere with no cost. If it is a valid operand for an
1354 ADD or AND, likewise return 0 if we know it will be used in that
1355 context. Otherwise, return 2 since it might be used there later.
1356 All other constants take at least two insns. */
1357 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1358 {
1359 *total = 0;
1360 return true;
1361 }
1362 /* FALLTHRU */
1363
1364 case CONST_DOUBLE:
1365 if (x == CONST0_RTX (mode))
1366 *total = 0;
1367 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1368 || (outer_code == AND && and_operand (x, VOIDmode)))
1369 *total = 0;
1370 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1371 *total = 2;
1372 else
1373 *total = COSTS_N_INSNS (2);
1374 return true;
1375
1376 case CONST:
1377 case SYMBOL_REF:
1378 case LABEL_REF:
1379 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1380 *total = COSTS_N_INSNS (outer_code != MEM);
1381 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1382 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1383 else if (tls_symbolic_operand_type (x))
1384 /* Estimate of cost for call_pal rduniq. */
1385 /* ??? How many insns do we emit here? More than one... */
1386 *total = COSTS_N_INSNS (15);
1387 else
1388 /* Otherwise we do a load from the GOT. */
1389 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1390 return true;
1391
1392 case HIGH:
1393 /* This is effectively an add_operand. */
1394 *total = 2;
1395 return true;
1396
1397 case PLUS:
1398 case MINUS:
1399 if (float_mode_p)
1400 *total = cost_data->fp_add;
1401 else if (GET_CODE (XEXP (x, 0)) == MULT
1402 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1403 {
1404 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1405 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1406 return true;
1407 }
1408 return false;
1409
1410 case MULT:
1411 if (float_mode_p)
1412 *total = cost_data->fp_mult;
1413 else if (mode == DImode)
1414 *total = cost_data->int_mult_di;
1415 else
1416 *total = cost_data->int_mult_si;
1417 return false;
1418
1419 case ASHIFT:
1420 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1421 && INTVAL (XEXP (x, 1)) <= 3)
1422 {
1423 *total = COSTS_N_INSNS (1);
1424 return false;
1425 }
1426 /* FALLTHRU */
1427
1428 case ASHIFTRT:
1429 case LSHIFTRT:
1430 *total = cost_data->int_shift;
1431 return false;
1432
1433 case IF_THEN_ELSE:
1434 if (float_mode_p)
1435 *total = cost_data->fp_add;
1436 else
1437 *total = cost_data->int_cmov;
1438 return false;
1439
1440 case DIV:
1441 case UDIV:
1442 case MOD:
1443 case UMOD:
1444 if (!float_mode_p)
1445 *total = cost_data->int_div;
1446 else if (mode == SFmode)
1447 *total = cost_data->fp_div_sf;
1448 else
1449 *total = cost_data->fp_div_df;
1450 return false;
1451
1452 case MEM:
1453 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1454 return true;
1455
1456 case NEG:
1457 if (! float_mode_p)
1458 {
1459 *total = COSTS_N_INSNS (1);
1460 return false;
1461 }
1462 /* FALLTHRU */
1463
1464 case ABS:
1465 if (! float_mode_p)
1466 {
1467 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1468 return false;
1469 }
1470 /* FALLTHRU */
1471
1472 case FLOAT:
1473 case UNSIGNED_FLOAT:
1474 case FIX:
1475 case UNSIGNED_FIX:
1476 case FLOAT_EXTEND:
1477 case FLOAT_TRUNCATE:
1478 *total = cost_data->fp_add;
1479 return false;
1480
1481 default:
1482 return false;
1483 }
1484 }
1485 \f
1486 /* REF is an alignable memory location. Place an aligned SImode
1487 reference into *PALIGNED_MEM and the number of bits to shift into
1488 *PBITNUM. SCRATCH is a free register for use in reloading out
1489 of range stack slots. */
1490
1491 void
1492 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1493 {
1494 rtx base;
1495 HOST_WIDE_INT offset = 0;
1496
1497 gcc_assert (GET_CODE (ref) == MEM);
1498
1499 if (reload_in_progress
1500 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1501 {
1502 base = find_replacement (&XEXP (ref, 0));
1503
1504 gcc_assert (memory_address_p (GET_MODE (ref), base));
1505 }
1506 else
1507 base = XEXP (ref, 0);
1508
1509 if (GET_CODE (base) == PLUS)
1510 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1511
1512 *paligned_mem
1513 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1514
1515 if (WORDS_BIG_ENDIAN)
1516 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1517 + (offset & 3) * 8));
1518 else
1519 *pbitnum = GEN_INT ((offset & 3) * 8);
1520 }
1521
1522 /* Similar, but just get the address. Handle the two reload cases.
1523 Add EXTRA_OFFSET to the address we return. */
1524
1525 rtx
1526 get_unaligned_address (rtx ref, int extra_offset)
1527 {
1528 rtx base;
1529 HOST_WIDE_INT offset = 0;
1530
1531 gcc_assert (GET_CODE (ref) == MEM);
1532
1533 if (reload_in_progress
1534 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1535 {
1536 base = find_replacement (&XEXP (ref, 0));
1537
1538 gcc_assert (memory_address_p (GET_MODE (ref), base));
1539 }
1540 else
1541 base = XEXP (ref, 0);
1542
1543 if (GET_CODE (base) == PLUS)
1544 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1545
1546 return plus_constant (base, offset + extra_offset);
1547 }
1548
1549 /* On the Alpha, all (non-symbolic) constants except zero go into
1550 a floating-point register via memory. Note that we cannot
1551 return anything that is not a subset of CLASS, and that some
1552 symbolic constants cannot be dropped to memory. */
1553
1554 enum reg_class
1555 alpha_preferred_reload_class(rtx x, enum reg_class class)
1556 {
1557 /* Zero is present in any register class. */
1558 if (x == CONST0_RTX (GET_MODE (x)))
1559 return class;
1560
1561 /* These sorts of constants we can easily drop to memory. */
1562 if (GET_CODE (x) == CONST_INT
1563 || GET_CODE (x) == CONST_DOUBLE
1564 || GET_CODE (x) == CONST_VECTOR)
1565 {
1566 if (class == FLOAT_REGS)
1567 return NO_REGS;
1568 if (class == ALL_REGS)
1569 return GENERAL_REGS;
1570 return class;
1571 }
1572
1573 /* All other kinds of constants should not (and in the case of HIGH
1574 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1575 secondary reload. */
1576 if (CONSTANT_P (x))
1577 return (class == ALL_REGS ? GENERAL_REGS : class);
1578
1579 return class;
1580 }
1581
1582 /* Loading and storing HImode or QImode values to and from memory
1583 usually requires a scratch register. The exceptions are loading
1584 QImode and HImode from an aligned address to a general register
1585 unless byte instructions are permitted.
1586
1587 We also cannot load an unaligned address or a paradoxical SUBREG
1588 into an FP register.
1589
1590 We also cannot do integral arithmetic into FP regs, as might result
1591 from register elimination into a DImode fp register. */
1592
1593 enum reg_class
1594 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1595 rtx x, int in)
1596 {
1597 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1598 {
1599 if (GET_CODE (x) == MEM
1600 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1601 || (GET_CODE (x) == SUBREG
1602 && (GET_CODE (SUBREG_REG (x)) == MEM
1603 || (GET_CODE (SUBREG_REG (x)) == REG
1604 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1605 {
1606 if (!in || !aligned_memory_operand(x, mode))
1607 return GENERAL_REGS;
1608 }
1609 }
1610
1611 if (class == FLOAT_REGS)
1612 {
1613 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1614 return GENERAL_REGS;
1615
1616 if (GET_CODE (x) == SUBREG
1617 && (GET_MODE_SIZE (GET_MODE (x))
1618 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1619 return GENERAL_REGS;
1620
1621 if (in && INTEGRAL_MODE_P (mode)
1622 && ! (memory_operand (x, mode) || x == const0_rtx))
1623 return GENERAL_REGS;
1624 }
1625
1626 return NO_REGS;
1627 }
1628 \f
1629 /* Subfunction of the following function. Update the flags of any MEM
1630 found in part of X. */
1631
1632 static int
1633 alpha_set_memflags_1 (rtx *xp, void *data)
1634 {
1635 rtx x = *xp, orig = (rtx) data;
1636
1637 if (GET_CODE (x) != MEM)
1638 return 0;
1639
1640 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1641 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1642 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1643 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1644 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1645
1646 /* Sadly, we cannot use alias sets because the extra aliasing
1647 produced by the AND interferes. Given that two-byte quantities
1648 are the only thing we would be able to differentiate anyway,
1649 there does not seem to be any point in convoluting the early
1650 out of the alias check. */
1651
1652 return -1;
1653 }
1654
1655 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1656 generated to perform a memory operation, look for any MEMs in either
1657 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1658 volatile flags from REF into each of the MEMs found. If REF is not
1659 a MEM, don't do anything. */
1660
1661 void
1662 alpha_set_memflags (rtx insn, rtx ref)
1663 {
1664 rtx *base_ptr;
1665
1666 if (GET_CODE (ref) != MEM)
1667 return;
1668
1669 /* This is only called from alpha.md, after having had something
1670 generated from one of the insn patterns. So if everything is
1671 zero, the pattern is already up-to-date. */
1672 if (!MEM_VOLATILE_P (ref)
1673 && !MEM_IN_STRUCT_P (ref)
1674 && !MEM_SCALAR_P (ref)
1675 && !MEM_NOTRAP_P (ref)
1676 && !MEM_READONLY_P (ref))
1677 return;
1678
1679 if (INSN_P (insn))
1680 base_ptr = &PATTERN (insn);
1681 else
1682 base_ptr = &insn;
1683 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1684 }
1685 \f
1686 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1687 int, bool);
1688
1689 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1690 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1691 and return pc_rtx if successful. */
1692
1693 static rtx
1694 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1695 HOST_WIDE_INT c, int n, bool no_output)
1696 {
1697 HOST_WIDE_INT new;
1698 int i, bits;
1699 /* Use a pseudo if highly optimizing and still generating RTL. */
1700 rtx subtarget
1701 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1702 rtx temp, insn;
1703
1704 /* If this is a sign-extended 32-bit constant, we can do this in at most
1705 three insns, so do it if we have enough insns left. We always have
1706 a sign-extended 32-bit constant when compiling on a narrow machine. */
1707
1708 if (HOST_BITS_PER_WIDE_INT != 64
1709 || c >> 31 == -1 || c >> 31 == 0)
1710 {
1711 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1712 HOST_WIDE_INT tmp1 = c - low;
1713 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1714 HOST_WIDE_INT extra = 0;
1715
1716 /* If HIGH will be interpreted as negative but the constant is
1717 positive, we must adjust it to do two ldha insns. */
1718
1719 if ((high & 0x8000) != 0 && c >= 0)
1720 {
1721 extra = 0x4000;
1722 tmp1 -= 0x40000000;
1723 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1724 }
1725
1726 if (c == low || (low == 0 && extra == 0))
1727 {
1728 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1729 but that meant that we can't handle INT_MIN on 32-bit machines
1730 (like NT/Alpha), because we recurse indefinitely through
1731 emit_move_insn to gen_movdi. So instead, since we know exactly
1732 what we want, create it explicitly. */
1733
1734 if (no_output)
1735 return pc_rtx;
1736 if (target == NULL)
1737 target = gen_reg_rtx (mode);
1738 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1739 return target;
1740 }
1741 else if (n >= 2 + (extra != 0))
1742 {
1743 if (no_output)
1744 return pc_rtx;
1745 if (no_new_pseudos)
1746 {
1747 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1748 temp = target;
1749 }
1750 else
1751 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1752 subtarget, mode);
1753
1754 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1755 This means that if we go through expand_binop, we'll try to
1756 generate extensions, etc, which will require new pseudos, which
1757 will fail during some split phases. The SImode add patterns
1758 still exist, but are not named. So build the insns by hand. */
1759
1760 if (extra != 0)
1761 {
1762 if (! subtarget)
1763 subtarget = gen_reg_rtx (mode);
1764 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1765 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1766 emit_insn (insn);
1767 temp = subtarget;
1768 }
1769
1770 if (target == NULL)
1771 target = gen_reg_rtx (mode);
1772 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1773 insn = gen_rtx_SET (VOIDmode, target, insn);
1774 emit_insn (insn);
1775 return target;
1776 }
1777 }
1778
1779 /* If we couldn't do it that way, try some other methods. But if we have
1780 no instructions left, don't bother. Likewise, if this is SImode and
1781 we can't make pseudos, we can't do anything since the expand_binop
1782 and expand_unop calls will widen and try to make pseudos. */
1783
1784 if (n == 1 || (mode == SImode && no_new_pseudos))
1785 return 0;
1786
1787 /* Next, see if we can load a related constant and then shift and possibly
1788 negate it to get the constant we want. Try this once each increasing
1789 numbers of insns. */
1790
1791 for (i = 1; i < n; i++)
1792 {
1793 /* First, see if minus some low bits, we've an easy load of
1794 high bits. */
1795
1796 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1797 if (new != 0)
1798 {
1799 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1800 if (temp)
1801 {
1802 if (no_output)
1803 return temp;
1804 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1805 target, 0, OPTAB_WIDEN);
1806 }
1807 }
1808
1809 /* Next try complementing. */
1810 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1811 if (temp)
1812 {
1813 if (no_output)
1814 return temp;
1815 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1816 }
1817
1818 /* Next try to form a constant and do a left shift. We can do this
1819 if some low-order bits are zero; the exact_log2 call below tells
1820 us that information. The bits we are shifting out could be any
1821 value, but here we'll just try the 0- and sign-extended forms of
1822 the constant. To try to increase the chance of having the same
1823 constant in more than one insn, start at the highest number of
1824 bits to shift, but try all possibilities in case a ZAPNOT will
1825 be useful. */
1826
1827 bits = exact_log2 (c & -c);
1828 if (bits > 0)
1829 for (; bits > 0; bits--)
1830 {
1831 new = c >> bits;
1832 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1833 if (!temp && c < 0)
1834 {
1835 new = (unsigned HOST_WIDE_INT)c >> bits;
1836 temp = alpha_emit_set_const (subtarget, mode, new,
1837 i, no_output);
1838 }
1839 if (temp)
1840 {
1841 if (no_output)
1842 return temp;
1843 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1844 target, 0, OPTAB_WIDEN);
1845 }
1846 }
1847
1848 /* Now try high-order zero bits. Here we try the shifted-in bits as
1849 all zero and all ones. Be careful to avoid shifting outside the
1850 mode and to avoid shifting outside the host wide int size. */
1851 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1852 confuse the recursive call and set all of the high 32 bits. */
1853
1854 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1855 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1856 if (bits > 0)
1857 for (; bits > 0; bits--)
1858 {
1859 new = c << bits;
1860 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1861 if (!temp)
1862 {
1863 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1864 temp = alpha_emit_set_const (subtarget, mode, new,
1865 i, no_output);
1866 }
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1872 target, 1, OPTAB_WIDEN);
1873 }
1874 }
1875
1876 /* Now try high-order 1 bits. We get that with a sign-extension.
1877 But one bit isn't enough here. Be careful to avoid shifting outside
1878 the mode and to avoid shifting outside the host wide int size. */
1879
1880 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1881 - floor_log2 (~ c) - 2);
1882 if (bits > 0)
1883 for (; bits > 0; bits--)
1884 {
1885 new = c << bits;
1886 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1887 if (!temp)
1888 {
1889 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1890 temp = alpha_emit_set_const (subtarget, mode, new,
1891 i, no_output);
1892 }
1893 if (temp)
1894 {
1895 if (no_output)
1896 return temp;
1897 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1898 target, 0, OPTAB_WIDEN);
1899 }
1900 }
1901 }
1902
1903 #if HOST_BITS_PER_WIDE_INT == 64
1904 /* Finally, see if can load a value into the target that is the same as the
1905 constant except that all bytes that are 0 are changed to be 0xff. If we
1906 can, then we can do a ZAPNOT to obtain the desired constant. */
1907
1908 new = c;
1909 for (i = 0; i < 64; i += 8)
1910 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1911 new |= (HOST_WIDE_INT) 0xff << i;
1912
1913 /* We are only called for SImode and DImode. If this is SImode, ensure that
1914 we are sign extended to a full word. */
1915
1916 if (mode == SImode)
1917 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1918
1919 if (new != c)
1920 {
1921 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1922 if (temp)
1923 {
1924 if (no_output)
1925 return temp;
1926 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1927 target, 0, OPTAB_WIDEN);
1928 }
1929 }
1930 #endif
1931
1932 return 0;
1933 }
1934
1935 /* Try to output insns to set TARGET equal to the constant C if it can be
1936 done in less than N insns. Do all computations in MODE. Returns the place
1937 where the output has been placed if it can be done and the insns have been
1938 emitted. If it would take more than N insns, zero is returned and no
1939 insns and emitted. */
1940
1941 static rtx
1942 alpha_emit_set_const (rtx target, enum machine_mode mode,
1943 HOST_WIDE_INT c, int n, bool no_output)
1944 {
1945 enum machine_mode orig_mode = mode;
1946 rtx orig_target = target;
1947 rtx result = 0;
1948 int i;
1949
1950 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1951 can't load this constant in one insn, do this in DImode. */
1952 if (no_new_pseudos && mode == SImode
1953 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1954 {
1955 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1956 if (result)
1957 return result;
1958
1959 target = no_output ? NULL : gen_lowpart (DImode, target);
1960 mode = DImode;
1961 }
1962 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1963 {
1964 target = no_output ? NULL : gen_lowpart (DImode, target);
1965 mode = DImode;
1966 }
1967
1968 /* Try 1 insn, then 2, then up to N. */
1969 for (i = 1; i <= n; i++)
1970 {
1971 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1972 if (result)
1973 {
1974 rtx insn, set;
1975
1976 if (no_output)
1977 return result;
1978
1979 insn = get_last_insn ();
1980 set = single_set (insn);
1981 if (! CONSTANT_P (SET_SRC (set)))
1982 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1983 break;
1984 }
1985 }
1986
1987 /* Allow for the case where we changed the mode of TARGET. */
1988 if (result)
1989 {
1990 if (result == target)
1991 result = orig_target;
1992 else if (mode != orig_mode)
1993 result = gen_lowpart (orig_mode, result);
1994 }
1995
1996 return result;
1997 }
1998
1999 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2000 fall back to a straight forward decomposition. We do this to avoid
2001 exponential run times encountered when looking for longer sequences
2002 with alpha_emit_set_const. */
2003
2004 static rtx
2005 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2006 {
2007 HOST_WIDE_INT d1, d2, d3, d4;
2008
2009 /* Decompose the entire word */
2010 #if HOST_BITS_PER_WIDE_INT >= 64
2011 gcc_assert (c2 == -(c1 < 0));
2012 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2013 c1 -= d1;
2014 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2015 c1 = (c1 - d2) >> 32;
2016 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2017 c1 -= d3;
2018 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 gcc_assert (c1 == d4);
2020 #else
2021 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2022 c1 -= d1;
2023 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2024 gcc_assert (c1 == d2);
2025 c2 += (d2 < 0);
2026 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2027 c2 -= d3;
2028 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2029 gcc_assert (c2 == d4);
2030 #endif
2031
2032 /* Construct the high word */
2033 if (d4)
2034 {
2035 emit_move_insn (target, GEN_INT (d4));
2036 if (d3)
2037 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2038 }
2039 else
2040 emit_move_insn (target, GEN_INT (d3));
2041
2042 /* Shift it into place */
2043 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2044
2045 /* Add in the low bits. */
2046 if (d2)
2047 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2048 if (d1)
2049 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2050
2051 return target;
2052 }
2053
2054 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2055 the low 64 bits. */
2056
2057 static void
2058 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2059 {
2060 HOST_WIDE_INT i0, i1;
2061
2062 if (GET_CODE (x) == CONST_VECTOR)
2063 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2064
2065
2066 if (GET_CODE (x) == CONST_INT)
2067 {
2068 i0 = INTVAL (x);
2069 i1 = -(i0 < 0);
2070 }
2071 else if (HOST_BITS_PER_WIDE_INT >= 64)
2072 {
2073 i0 = CONST_DOUBLE_LOW (x);
2074 i1 = -(i0 < 0);
2075 }
2076 else
2077 {
2078 i0 = CONST_DOUBLE_LOW (x);
2079 i1 = CONST_DOUBLE_HIGH (x);
2080 }
2081
2082 *p0 = i0;
2083 *p1 = i1;
2084 }
2085
2086 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2087 are willing to load the value into a register via a move pattern.
2088 Normally this is all symbolic constants, integral constants that
2089 take three or fewer instructions, and floating-point zero. */
2090
2091 bool
2092 alpha_legitimate_constant_p (rtx x)
2093 {
2094 enum machine_mode mode = GET_MODE (x);
2095 HOST_WIDE_INT i0, i1;
2096
2097 switch (GET_CODE (x))
2098 {
2099 case CONST:
2100 case LABEL_REF:
2101 case SYMBOL_REF:
2102 case HIGH:
2103 return true;
2104
2105 case CONST_DOUBLE:
2106 if (x == CONST0_RTX (mode))
2107 return true;
2108 if (FLOAT_MODE_P (mode))
2109 return false;
2110 goto do_integer;
2111
2112 case CONST_VECTOR:
2113 if (x == CONST0_RTX (mode))
2114 return true;
2115 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2116 return false;
2117 if (GET_MODE_SIZE (mode) != 8)
2118 return false;
2119 goto do_integer;
2120
2121 case CONST_INT:
2122 do_integer:
2123 if (TARGET_BUILD_CONSTANTS)
2124 return true;
2125 alpha_extract_integer (x, &i0, &i1);
2126 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2127 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2128 return false;
2129
2130 default:
2131 return false;
2132 }
2133 }
2134
2135 /* Operand 1 is known to be a constant, and should require more than one
2136 instruction to load. Emit that multi-part load. */
2137
2138 bool
2139 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2140 {
2141 HOST_WIDE_INT i0, i1;
2142 rtx temp = NULL_RTX;
2143
2144 alpha_extract_integer (operands[1], &i0, &i1);
2145
2146 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2147 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2148
2149 if (!temp && TARGET_BUILD_CONSTANTS)
2150 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2151
2152 if (temp)
2153 {
2154 if (!rtx_equal_p (operands[0], temp))
2155 emit_move_insn (operands[0], temp);
2156 return true;
2157 }
2158
2159 return false;
2160 }
2161
2162 /* Expand a move instruction; return true if all work is done.
2163 We don't handle non-bwx subword loads here. */
2164
2165 bool
2166 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2167 {
2168 /* If the output is not a register, the input must be. */
2169 if (GET_CODE (operands[0]) == MEM
2170 && ! reg_or_0_operand (operands[1], mode))
2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Allow legitimize_address to perform some simplifications. */
2174 if (mode == Pmode && symbolic_operand (operands[1], mode))
2175 {
2176 rtx tmp;
2177
2178 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2179 if (tmp)
2180 {
2181 if (tmp == operands[0])
2182 return true;
2183 operands[1] = tmp;
2184 return false;
2185 }
2186 }
2187
2188 /* Early out for non-constants and valid constants. */
2189 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2190 return false;
2191
2192 /* Split large integers. */
2193 if (GET_CODE (operands[1]) == CONST_INT
2194 || GET_CODE (operands[1]) == CONST_DOUBLE
2195 || GET_CODE (operands[1]) == CONST_VECTOR)
2196 {
2197 if (alpha_split_const_mov (mode, operands))
2198 return true;
2199 }
2200
2201 /* Otherwise we've nothing left but to drop the thing to memory. */
2202 operands[1] = force_const_mem (mode, operands[1]);
2203 if (reload_in_progress)
2204 {
2205 emit_move_insn (operands[0], XEXP (operands[1], 0));
2206 operands[1] = copy_rtx (operands[1]);
2207 XEXP (operands[1], 0) = operands[0];
2208 }
2209 else
2210 operands[1] = validize_mem (operands[1]);
2211 return false;
2212 }
2213
2214 /* Expand a non-bwx QImode or HImode move instruction;
2215 return true if all work is done. */
2216
2217 bool
2218 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2219 {
2220 /* If the output is not a register, the input must be. */
2221 if (GET_CODE (operands[0]) == MEM)
2222 operands[1] = force_reg (mode, operands[1]);
2223
2224 /* Handle four memory cases, unaligned and aligned for either the input
2225 or the output. The only case where we can be called during reload is
2226 for aligned loads; all other cases require temporaries. */
2227
2228 if (GET_CODE (operands[1]) == MEM
2229 || (GET_CODE (operands[1]) == SUBREG
2230 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2231 || (reload_in_progress && GET_CODE (operands[1]) == REG
2232 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2233 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2234 && GET_CODE (SUBREG_REG (operands[1])) == REG
2235 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2236 {
2237 if (aligned_memory_operand (operands[1], mode))
2238 {
2239 if (reload_in_progress)
2240 {
2241 emit_insn ((mode == QImode
2242 ? gen_reload_inqi_help
2243 : gen_reload_inhi_help)
2244 (operands[0], operands[1],
2245 gen_rtx_REG (SImode, REGNO (operands[0]))));
2246 }
2247 else
2248 {
2249 rtx aligned_mem, bitnum;
2250 rtx scratch = gen_reg_rtx (SImode);
2251 rtx subtarget;
2252 bool copyout;
2253
2254 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2255
2256 subtarget = operands[0];
2257 if (GET_CODE (subtarget) == REG)
2258 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2259 else
2260 subtarget = gen_reg_rtx (DImode), copyout = true;
2261
2262 emit_insn ((mode == QImode
2263 ? gen_aligned_loadqi
2264 : gen_aligned_loadhi)
2265 (subtarget, aligned_mem, bitnum, scratch));
2266
2267 if (copyout)
2268 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2269 }
2270 }
2271 else
2272 {
2273 /* Don't pass these as parameters since that makes the generated
2274 code depend on parameter evaluation order which will cause
2275 bootstrap failures. */
2276
2277 rtx temp1, temp2, seq, subtarget;
2278 bool copyout;
2279
2280 temp1 = gen_reg_rtx (DImode);
2281 temp2 = gen_reg_rtx (DImode);
2282
2283 subtarget = operands[0];
2284 if (GET_CODE (subtarget) == REG)
2285 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2286 else
2287 subtarget = gen_reg_rtx (DImode), copyout = true;
2288
2289 seq = ((mode == QImode
2290 ? gen_unaligned_loadqi
2291 : gen_unaligned_loadhi)
2292 (subtarget, get_unaligned_address (operands[1], 0),
2293 temp1, temp2));
2294 alpha_set_memflags (seq, operands[1]);
2295 emit_insn (seq);
2296
2297 if (copyout)
2298 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2299 }
2300 return true;
2301 }
2302
2303 if (GET_CODE (operands[0]) == MEM
2304 || (GET_CODE (operands[0]) == SUBREG
2305 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2306 || (reload_in_progress && GET_CODE (operands[0]) == REG
2307 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2308 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2309 && GET_CODE (SUBREG_REG (operands[0])) == REG
2310 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2311 {
2312 if (aligned_memory_operand (operands[0], mode))
2313 {
2314 rtx aligned_mem, bitnum;
2315 rtx temp1 = gen_reg_rtx (SImode);
2316 rtx temp2 = gen_reg_rtx (SImode);
2317
2318 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2319
2320 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2321 temp1, temp2));
2322 }
2323 else
2324 {
2325 rtx temp1 = gen_reg_rtx (DImode);
2326 rtx temp2 = gen_reg_rtx (DImode);
2327 rtx temp3 = gen_reg_rtx (DImode);
2328 rtx seq = ((mode == QImode
2329 ? gen_unaligned_storeqi
2330 : gen_unaligned_storehi)
2331 (get_unaligned_address (operands[0], 0),
2332 operands[1], temp1, temp2, temp3));
2333
2334 alpha_set_memflags (seq, operands[0]);
2335 emit_insn (seq);
2336 }
2337 return true;
2338 }
2339
2340 return false;
2341 }
2342
2343 /* Implement the movmisalign patterns. One of the operands is a memory
2344 that is not naturally aligned. Emit instructions to load it. */
2345
2346 void
2347 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2348 {
2349 /* Honor misaligned loads, for those we promised to do so. */
2350 if (MEM_P (operands[1]))
2351 {
2352 rtx tmp;
2353
2354 if (register_operand (operands[0], mode))
2355 tmp = operands[0];
2356 else
2357 tmp = gen_reg_rtx (mode);
2358
2359 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2360 if (tmp != operands[0])
2361 emit_move_insn (operands[0], tmp);
2362 }
2363 else if (MEM_P (operands[0]))
2364 {
2365 if (!reg_or_0_operand (operands[1], mode))
2366 operands[1] = force_reg (mode, operands[1]);
2367 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2368 }
2369 else
2370 gcc_unreachable ();
2371 }
2372
2373 /* Generate an unsigned DImode to FP conversion. This is the same code
2374 optabs would emit if we didn't have TFmode patterns.
2375
2376 For SFmode, this is the only construction I've found that can pass
2377 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2378 intermediates will work, because you'll get intermediate rounding
2379 that ruins the end result. Some of this could be fixed by turning
2380 on round-to-positive-infinity, but that requires diddling the fpsr,
2381 which kills performance. I tried turning this around and converting
2382 to a negative number, so that I could turn on /m, but either I did
2383 it wrong or there's something else cause I wound up with the exact
2384 same single-bit error. There is a branch-less form of this same code:
2385
2386 srl $16,1,$1
2387 and $16,1,$2
2388 cmplt $16,0,$3
2389 or $1,$2,$2
2390 cmovge $16,$16,$2
2391 itoft $3,$f10
2392 itoft $2,$f11
2393 cvtqs $f11,$f11
2394 adds $f11,$f11,$f0
2395 fcmoveq $f10,$f11,$f0
2396
2397 I'm not using it because it's the same number of instructions as
2398 this branch-full form, and it has more serialized long latency
2399 instructions on the critical path.
2400
2401 For DFmode, we can avoid rounding errors by breaking up the word
2402 into two pieces, converting them separately, and adding them back:
2403
2404 LC0: .long 0,0x5f800000
2405
2406 itoft $16,$f11
2407 lda $2,LC0
2408 cmplt $16,0,$1
2409 cpyse $f11,$f31,$f10
2410 cpyse $f31,$f11,$f11
2411 s4addq $1,$2,$1
2412 lds $f12,0($1)
2413 cvtqt $f10,$f10
2414 cvtqt $f11,$f11
2415 addt $f12,$f10,$f0
2416 addt $f0,$f11,$f0
2417
2418 This doesn't seem to be a clear-cut win over the optabs form.
2419 It probably all depends on the distribution of numbers being
2420 converted -- in the optabs form, all but high-bit-set has a
2421 much lower minimum execution time. */
2422
2423 void
2424 alpha_emit_floatuns (rtx operands[2])
2425 {
2426 rtx neglab, donelab, i0, i1, f0, in, out;
2427 enum machine_mode mode;
2428
2429 out = operands[0];
2430 in = force_reg (DImode, operands[1]);
2431 mode = GET_MODE (out);
2432 neglab = gen_label_rtx ();
2433 donelab = gen_label_rtx ();
2434 i0 = gen_reg_rtx (DImode);
2435 i1 = gen_reg_rtx (DImode);
2436 f0 = gen_reg_rtx (mode);
2437
2438 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2439
2440 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2441 emit_jump_insn (gen_jump (donelab));
2442 emit_barrier ();
2443
2444 emit_label (neglab);
2445
2446 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2447 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2448 emit_insn (gen_iordi3 (i0, i0, i1));
2449 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2450 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2451
2452 emit_label (donelab);
2453 }
2454
2455 /* Generate the comparison for a conditional branch. */
2456
2457 rtx
2458 alpha_emit_conditional_branch (enum rtx_code code)
2459 {
2460 enum rtx_code cmp_code, branch_code;
2461 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2462 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2463 rtx tem;
2464
2465 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2466 {
2467 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2468 op1 = const0_rtx;
2469 alpha_compare.fp_p = 0;
2470 }
2471
2472 /* The general case: fold the comparison code to the types of compares
2473 that we have, choosing the branch as necessary. */
2474 switch (code)
2475 {
2476 case EQ: case LE: case LT: case LEU: case LTU:
2477 case UNORDERED:
2478 /* We have these compares: */
2479 cmp_code = code, branch_code = NE;
2480 break;
2481
2482 case NE:
2483 case ORDERED:
2484 /* These must be reversed. */
2485 cmp_code = reverse_condition (code), branch_code = EQ;
2486 break;
2487
2488 case GE: case GT: case GEU: case GTU:
2489 /* For FP, we swap them, for INT, we reverse them. */
2490 if (alpha_compare.fp_p)
2491 {
2492 cmp_code = swap_condition (code);
2493 branch_code = NE;
2494 tem = op0, op0 = op1, op1 = tem;
2495 }
2496 else
2497 {
2498 cmp_code = reverse_condition (code);
2499 branch_code = EQ;
2500 }
2501 break;
2502
2503 default:
2504 gcc_unreachable ();
2505 }
2506
2507 if (alpha_compare.fp_p)
2508 {
2509 cmp_mode = DFmode;
2510 if (flag_unsafe_math_optimizations)
2511 {
2512 /* When we are not as concerned about non-finite values, and we
2513 are comparing against zero, we can branch directly. */
2514 if (op1 == CONST0_RTX (DFmode))
2515 cmp_code = UNKNOWN, branch_code = code;
2516 else if (op0 == CONST0_RTX (DFmode))
2517 {
2518 /* Undo the swap we probably did just above. */
2519 tem = op0, op0 = op1, op1 = tem;
2520 branch_code = swap_condition (cmp_code);
2521 cmp_code = UNKNOWN;
2522 }
2523 }
2524 else
2525 {
2526 /* ??? We mark the branch mode to be CCmode to prevent the
2527 compare and branch from being combined, since the compare
2528 insn follows IEEE rules that the branch does not. */
2529 branch_mode = CCmode;
2530 }
2531 }
2532 else
2533 {
2534 cmp_mode = DImode;
2535
2536 /* The following optimizations are only for signed compares. */
2537 if (code != LEU && code != LTU && code != GEU && code != GTU)
2538 {
2539 /* Whee. Compare and branch against 0 directly. */
2540 if (op1 == const0_rtx)
2541 cmp_code = UNKNOWN, branch_code = code;
2542
2543 /* If the constants doesn't fit into an immediate, but can
2544 be generated by lda/ldah, we adjust the argument and
2545 compare against zero, so we can use beq/bne directly. */
2546 /* ??? Don't do this when comparing against symbols, otherwise
2547 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2548 be declared false out of hand (at least for non-weak). */
2549 else if (GET_CODE (op1) == CONST_INT
2550 && (code == EQ || code == NE)
2551 && !(symbolic_operand (op0, VOIDmode)
2552 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2553 {
2554 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2555
2556 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2557 && (CONST_OK_FOR_LETTER_P (n, 'K')
2558 || CONST_OK_FOR_LETTER_P (n, 'L')))
2559 {
2560 cmp_code = PLUS, branch_code = code;
2561 op1 = GEN_INT (n);
2562 }
2563 }
2564 }
2565
2566 if (!reg_or_0_operand (op0, DImode))
2567 op0 = force_reg (DImode, op0);
2568 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2569 op1 = force_reg (DImode, op1);
2570 }
2571
2572 /* Emit an initial compare instruction, if necessary. */
2573 tem = op0;
2574 if (cmp_code != UNKNOWN)
2575 {
2576 tem = gen_reg_rtx (cmp_mode);
2577 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2578 }
2579
2580 /* Zero the operands. */
2581 memset (&alpha_compare, 0, sizeof (alpha_compare));
2582
2583 /* Return the branch comparison. */
2584 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2585 }
2586
2587 /* Certain simplifications can be done to make invalid setcc operations
2588 valid. Return the final comparison, or NULL if we can't work. */
2589
2590 rtx
2591 alpha_emit_setcc (enum rtx_code code)
2592 {
2593 enum rtx_code cmp_code;
2594 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2595 int fp_p = alpha_compare.fp_p;
2596 rtx tmp;
2597
2598 /* Zero the operands. */
2599 memset (&alpha_compare, 0, sizeof (alpha_compare));
2600
2601 if (fp_p && GET_MODE (op0) == TFmode)
2602 {
2603 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2604 op1 = const0_rtx;
2605 fp_p = 0;
2606 }
2607
2608 if (fp_p && !TARGET_FIX)
2609 return NULL_RTX;
2610
2611 /* The general case: fold the comparison code to the types of compares
2612 that we have, choosing the branch as necessary. */
2613
2614 cmp_code = UNKNOWN;
2615 switch (code)
2616 {
2617 case EQ: case LE: case LT: case LEU: case LTU:
2618 case UNORDERED:
2619 /* We have these compares. */
2620 if (fp_p)
2621 cmp_code = code, code = NE;
2622 break;
2623
2624 case NE:
2625 if (!fp_p && op1 == const0_rtx)
2626 break;
2627 /* FALLTHRU */
2628
2629 case ORDERED:
2630 cmp_code = reverse_condition (code);
2631 code = EQ;
2632 break;
2633
2634 case GE: case GT: case GEU: case GTU:
2635 /* These normally need swapping, but for integer zero we have
2636 special patterns that recognize swapped operands. */
2637 if (!fp_p && op1 == const0_rtx)
2638 break;
2639 code = swap_condition (code);
2640 if (fp_p)
2641 cmp_code = code, code = NE;
2642 tmp = op0, op0 = op1, op1 = tmp;
2643 break;
2644
2645 default:
2646 gcc_unreachable ();
2647 }
2648
2649 if (!fp_p)
2650 {
2651 if (!register_operand (op0, DImode))
2652 op0 = force_reg (DImode, op0);
2653 if (!reg_or_8bit_operand (op1, DImode))
2654 op1 = force_reg (DImode, op1);
2655 }
2656
2657 /* Emit an initial compare instruction, if necessary. */
2658 if (cmp_code != UNKNOWN)
2659 {
2660 enum machine_mode mode = fp_p ? DFmode : DImode;
2661
2662 tmp = gen_reg_rtx (mode);
2663 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2664 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2665
2666 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2667 op1 = const0_rtx;
2668 }
2669
2670 /* Return the setcc comparison. */
2671 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2672 }
2673
2674
2675 /* Rewrite a comparison against zero CMP of the form
2676 (CODE (cc0) (const_int 0)) so it can be written validly in
2677 a conditional move (if_then_else CMP ...).
2678 If both of the operands that set cc0 are nonzero we must emit
2679 an insn to perform the compare (it can't be done within
2680 the conditional move). */
2681
2682 rtx
2683 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2684 {
2685 enum rtx_code code = GET_CODE (cmp);
2686 enum rtx_code cmov_code = NE;
2687 rtx op0 = alpha_compare.op0;
2688 rtx op1 = alpha_compare.op1;
2689 int fp_p = alpha_compare.fp_p;
2690 enum machine_mode cmp_mode
2691 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2692 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2693 enum machine_mode cmov_mode = VOIDmode;
2694 int local_fast_math = flag_unsafe_math_optimizations;
2695 rtx tem;
2696
2697 /* Zero the operands. */
2698 memset (&alpha_compare, 0, sizeof (alpha_compare));
2699
2700 if (fp_p != FLOAT_MODE_P (mode))
2701 {
2702 enum rtx_code cmp_code;
2703
2704 if (! TARGET_FIX)
2705 return 0;
2706
2707 /* If we have fp<->int register move instructions, do a cmov by
2708 performing the comparison in fp registers, and move the
2709 zero/nonzero value to integer registers, where we can then
2710 use a normal cmov, or vice-versa. */
2711
2712 switch (code)
2713 {
2714 case EQ: case LE: case LT: case LEU: case LTU:
2715 /* We have these compares. */
2716 cmp_code = code, code = NE;
2717 break;
2718
2719 case NE:
2720 /* This must be reversed. */
2721 cmp_code = EQ, code = EQ;
2722 break;
2723
2724 case GE: case GT: case GEU: case GTU:
2725 /* These normally need swapping, but for integer zero we have
2726 special patterns that recognize swapped operands. */
2727 if (!fp_p && op1 == const0_rtx)
2728 cmp_code = code, code = NE;
2729 else
2730 {
2731 cmp_code = swap_condition (code);
2732 code = NE;
2733 tem = op0, op0 = op1, op1 = tem;
2734 }
2735 break;
2736
2737 default:
2738 gcc_unreachable ();
2739 }
2740
2741 tem = gen_reg_rtx (cmp_op_mode);
2742 emit_insn (gen_rtx_SET (VOIDmode, tem,
2743 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2744 op0, op1)));
2745
2746 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2747 op0 = gen_lowpart (cmp_op_mode, tem);
2748 op1 = CONST0_RTX (cmp_op_mode);
2749 fp_p = !fp_p;
2750 local_fast_math = 1;
2751 }
2752
2753 /* We may be able to use a conditional move directly.
2754 This avoids emitting spurious compares. */
2755 if (signed_comparison_operator (cmp, VOIDmode)
2756 && (!fp_p || local_fast_math)
2757 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2758 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2759
2760 /* We can't put the comparison inside the conditional move;
2761 emit a compare instruction and put that inside the
2762 conditional move. Make sure we emit only comparisons we have;
2763 swap or reverse as necessary. */
2764
2765 if (no_new_pseudos)
2766 return NULL_RTX;
2767
2768 switch (code)
2769 {
2770 case EQ: case LE: case LT: case LEU: case LTU:
2771 /* We have these compares: */
2772 break;
2773
2774 case NE:
2775 /* This must be reversed. */
2776 code = reverse_condition (code);
2777 cmov_code = EQ;
2778 break;
2779
2780 case GE: case GT: case GEU: case GTU:
2781 /* These must be swapped. */
2782 if (op1 != CONST0_RTX (cmp_mode))
2783 {
2784 code = swap_condition (code);
2785 tem = op0, op0 = op1, op1 = tem;
2786 }
2787 break;
2788
2789 default:
2790 gcc_unreachable ();
2791 }
2792
2793 if (!fp_p)
2794 {
2795 if (!reg_or_0_operand (op0, DImode))
2796 op0 = force_reg (DImode, op0);
2797 if (!reg_or_8bit_operand (op1, DImode))
2798 op1 = force_reg (DImode, op1);
2799 }
2800
2801 /* ??? We mark the branch mode to be CCmode to prevent the compare
2802 and cmov from being combined, since the compare insn follows IEEE
2803 rules that the cmov does not. */
2804 if (fp_p && !local_fast_math)
2805 cmov_mode = CCmode;
2806
2807 tem = gen_reg_rtx (cmp_op_mode);
2808 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2809 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2810 }
2811
2812 /* Simplify a conditional move of two constants into a setcc with
2813 arithmetic. This is done with a splitter since combine would
2814 just undo the work if done during code generation. It also catches
2815 cases we wouldn't have before cse. */
2816
2817 int
2818 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2819 rtx t_rtx, rtx f_rtx)
2820 {
2821 HOST_WIDE_INT t, f, diff;
2822 enum machine_mode mode;
2823 rtx target, subtarget, tmp;
2824
2825 mode = GET_MODE (dest);
2826 t = INTVAL (t_rtx);
2827 f = INTVAL (f_rtx);
2828 diff = t - f;
2829
2830 if (((code == NE || code == EQ) && diff < 0)
2831 || (code == GE || code == GT))
2832 {
2833 code = reverse_condition (code);
2834 diff = t, t = f, f = diff;
2835 diff = t - f;
2836 }
2837
2838 subtarget = target = dest;
2839 if (mode != DImode)
2840 {
2841 target = gen_lowpart (DImode, dest);
2842 if (! no_new_pseudos)
2843 subtarget = gen_reg_rtx (DImode);
2844 else
2845 subtarget = target;
2846 }
2847 /* Below, we must be careful to use copy_rtx on target and subtarget
2848 in intermediate insns, as they may be a subreg rtx, which may not
2849 be shared. */
2850
2851 if (f == 0 && exact_log2 (diff) > 0
2852 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2853 viable over a longer latency cmove. On EV5, the E0 slot is a
2854 scarce resource, and on EV4 shift has the same latency as a cmove. */
2855 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2856 {
2857 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2858 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2859
2860 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2861 GEN_INT (exact_log2 (t)));
2862 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2863 }
2864 else if (f == 0 && t == -1)
2865 {
2866 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2867 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2868
2869 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2870 }
2871 else if (diff == 1 || diff == 4 || diff == 8)
2872 {
2873 rtx add_op;
2874
2875 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2876 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2877
2878 if (diff == 1)
2879 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2880 else
2881 {
2882 add_op = GEN_INT (f);
2883 if (sext_add_operand (add_op, mode))
2884 {
2885 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2886 GEN_INT (diff));
2887 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2888 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2889 }
2890 else
2891 return 0;
2892 }
2893 }
2894 else
2895 return 0;
2896
2897 return 1;
2898 }
2899 \f
2900 /* Look up the function X_floating library function name for the
2901 given operation. */
2902
2903 struct xfloating_op GTY(())
2904 {
2905 const enum rtx_code code;
2906 const char *const GTY((skip)) osf_func;
2907 const char *const GTY((skip)) vms_func;
2908 rtx libcall;
2909 };
2910
2911 static GTY(()) struct xfloating_op xfloating_ops[] =
2912 {
2913 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2914 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2915 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2916 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2917 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2918 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2919 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2920 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2921 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2922 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2923 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2924 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2925 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2926 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2927 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2928 };
2929
2930 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2931 {
2932 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2933 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2934 };
2935
2936 static rtx
2937 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2938 {
2939 struct xfloating_op *ops = xfloating_ops;
2940 long n = ARRAY_SIZE (xfloating_ops);
2941 long i;
2942
2943 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2944
2945 /* How irritating. Nothing to key off for the main table. */
2946 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2947 {
2948 ops = vax_cvt_ops;
2949 n = ARRAY_SIZE (vax_cvt_ops);
2950 }
2951
2952 for (i = 0; i < n; ++i, ++ops)
2953 if (ops->code == code)
2954 {
2955 rtx func = ops->libcall;
2956 if (!func)
2957 {
2958 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2959 ? ops->vms_func : ops->osf_func);
2960 ops->libcall = func;
2961 }
2962 return func;
2963 }
2964
2965 gcc_unreachable ();
2966 }
2967
2968 /* Most X_floating operations take the rounding mode as an argument.
2969 Compute that here. */
2970
2971 static int
2972 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2973 enum alpha_fp_rounding_mode round)
2974 {
2975 int mode;
2976
2977 switch (round)
2978 {
2979 case ALPHA_FPRM_NORM:
2980 mode = 2;
2981 break;
2982 case ALPHA_FPRM_MINF:
2983 mode = 1;
2984 break;
2985 case ALPHA_FPRM_CHOP:
2986 mode = 0;
2987 break;
2988 case ALPHA_FPRM_DYN:
2989 mode = 4;
2990 break;
2991 default:
2992 gcc_unreachable ();
2993
2994 /* XXX For reference, round to +inf is mode = 3. */
2995 }
2996
2997 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2998 mode |= 0x10000;
2999
3000 return mode;
3001 }
3002
3003 /* Emit an X_floating library function call.
3004
3005 Note that these functions do not follow normal calling conventions:
3006 TFmode arguments are passed in two integer registers (as opposed to
3007 indirect); TFmode return values appear in R16+R17.
3008
3009 FUNC is the function to call.
3010 TARGET is where the output belongs.
3011 OPERANDS are the inputs.
3012 NOPERANDS is the count of inputs.
3013 EQUIV is the expression equivalent for the function.
3014 */
3015
3016 static void
3017 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3018 int noperands, rtx equiv)
3019 {
3020 rtx usage = NULL_RTX, tmp, reg;
3021 int regno = 16, i;
3022
3023 start_sequence ();
3024
3025 for (i = 0; i < noperands; ++i)
3026 {
3027 switch (GET_MODE (operands[i]))
3028 {
3029 case TFmode:
3030 reg = gen_rtx_REG (TFmode, regno);
3031 regno += 2;
3032 break;
3033
3034 case DFmode:
3035 reg = gen_rtx_REG (DFmode, regno + 32);
3036 regno += 1;
3037 break;
3038
3039 case VOIDmode:
3040 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
3041 /* FALLTHRU */
3042 case DImode:
3043 reg = gen_rtx_REG (DImode, regno);
3044 regno += 1;
3045 break;
3046
3047 default:
3048 gcc_unreachable ();
3049 }
3050
3051 emit_move_insn (reg, operands[i]);
3052 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3053 }
3054
3055 switch (GET_MODE (target))
3056 {
3057 case TFmode:
3058 reg = gen_rtx_REG (TFmode, 16);
3059 break;
3060 case DFmode:
3061 reg = gen_rtx_REG (DFmode, 32);
3062 break;
3063 case DImode:
3064 reg = gen_rtx_REG (DImode, 0);
3065 break;
3066 default:
3067 gcc_unreachable ();
3068 }
3069
3070 tmp = gen_rtx_MEM (QImode, func);
3071 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3072 const0_rtx, const0_rtx));
3073 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3074 CONST_OR_PURE_CALL_P (tmp) = 1;
3075
3076 tmp = get_insns ();
3077 end_sequence ();
3078
3079 emit_libcall_block (tmp, target, reg, equiv);
3080 }
3081
3082 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3083
3084 void
3085 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3086 {
3087 rtx func;
3088 int mode;
3089 rtx out_operands[3];
3090
3091 func = alpha_lookup_xfloating_lib_func (code);
3092 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3093
3094 out_operands[0] = operands[1];
3095 out_operands[1] = operands[2];
3096 out_operands[2] = GEN_INT (mode);
3097 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3098 gen_rtx_fmt_ee (code, TFmode, operands[1],
3099 operands[2]));
3100 }
3101
3102 /* Emit an X_floating library function call for a comparison. */
3103
3104 static rtx
3105 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3106 {
3107 enum rtx_code cmp_code, res_code;
3108 rtx func, out, operands[2];
3109
3110 /* X_floating library comparison functions return
3111 -1 unordered
3112 0 false
3113 1 true
3114 Convert the compare against the raw return value. */
3115
3116 cmp_code = *pcode;
3117 switch (cmp_code)
3118 {
3119 case UNORDERED:
3120 cmp_code = EQ;
3121 res_code = LT;
3122 break;
3123 case ORDERED:
3124 cmp_code = EQ;
3125 res_code = GE;
3126 break;
3127 case NE:
3128 res_code = NE;
3129 break;
3130 case EQ:
3131 case LT:
3132 case GT:
3133 case LE:
3134 case GE:
3135 res_code = GT;
3136 break;
3137 default:
3138 gcc_unreachable ();
3139 }
3140 *pcode = res_code;
3141
3142 func = alpha_lookup_xfloating_lib_func (cmp_code);
3143
3144 operands[0] = op0;
3145 operands[1] = op1;
3146 out = gen_reg_rtx (DImode);
3147
3148 /* ??? Strange mode for equiv because what's actually returned
3149 is -1,0,1, not a proper boolean value. */
3150 alpha_emit_xfloating_libcall (func, out, operands, 2,
3151 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3152
3153 return out;
3154 }
3155
3156 /* Emit an X_floating library function call for a conversion. */
3157
3158 void
3159 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3160 {
3161 int noperands = 1, mode;
3162 rtx out_operands[2];
3163 rtx func;
3164 enum rtx_code code = orig_code;
3165
3166 if (code == UNSIGNED_FIX)
3167 code = FIX;
3168
3169 func = alpha_lookup_xfloating_lib_func (code);
3170
3171 out_operands[0] = operands[1];
3172
3173 switch (code)
3174 {
3175 case FIX:
3176 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3177 out_operands[1] = GEN_INT (mode);
3178 noperands = 2;
3179 break;
3180 case FLOAT_TRUNCATE:
3181 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3182 out_operands[1] = GEN_INT (mode);
3183 noperands = 2;
3184 break;
3185 default:
3186 break;
3187 }
3188
3189 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3190 gen_rtx_fmt_e (orig_code,
3191 GET_MODE (operands[0]),
3192 operands[1]));
3193 }
3194
3195 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3196 OP[0] into OP[0,1]. Naturally, output operand ordering is
3197 little-endian. */
3198
3199 void
3200 alpha_split_tfmode_pair (rtx operands[4])
3201 {
3202 switch (GET_CODE (operands[1]))
3203 {
3204 case REG:
3205 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3206 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3207 break;
3208
3209 case MEM:
3210 operands[3] = adjust_address (operands[1], DImode, 8);
3211 operands[2] = adjust_address (operands[1], DImode, 0);
3212 break;
3213
3214 case CONST_DOUBLE:
3215 gcc_assert (operands[1] == CONST0_RTX (TFmode));
3216 operands[2] = operands[3] = const0_rtx;
3217 break;
3218
3219 default:
3220 gcc_unreachable ();
3221 }
3222
3223 switch (GET_CODE (operands[0]))
3224 {
3225 case REG:
3226 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3227 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3228 break;
3229
3230 case MEM:
3231 operands[1] = adjust_address (operands[0], DImode, 8);
3232 operands[0] = adjust_address (operands[0], DImode, 0);
3233 break;
3234
3235 default:
3236 gcc_unreachable ();
3237 }
3238 }
3239
3240 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3241 op2 is a register containing the sign bit, operation is the
3242 logical operation to be performed. */
3243
3244 void
3245 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3246 {
3247 rtx high_bit = operands[2];
3248 rtx scratch;
3249 int move;
3250
3251 alpha_split_tfmode_pair (operands);
3252
3253 /* Detect three flavors of operand overlap. */
3254 move = 1;
3255 if (rtx_equal_p (operands[0], operands[2]))
3256 move = 0;
3257 else if (rtx_equal_p (operands[1], operands[2]))
3258 {
3259 if (rtx_equal_p (operands[0], high_bit))
3260 move = 2;
3261 else
3262 move = -1;
3263 }
3264
3265 if (move < 0)
3266 emit_move_insn (operands[0], operands[2]);
3267
3268 /* ??? If the destination overlaps both source tf and high_bit, then
3269 assume source tf is dead in its entirety and use the other half
3270 for a scratch register. Otherwise "scratch" is just the proper
3271 destination register. */
3272 scratch = operands[move < 2 ? 1 : 3];
3273
3274 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3275
3276 if (move > 0)
3277 {
3278 emit_move_insn (operands[0], operands[2]);
3279 if (move > 1)
3280 emit_move_insn (operands[1], scratch);
3281 }
3282 }
3283 \f
3284 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3285 unaligned data:
3286
3287 unsigned: signed:
3288 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3289 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3290 lda r3,X(r11) lda r3,X+2(r11)
3291 extwl r1,r3,r1 extql r1,r3,r1
3292 extwh r2,r3,r2 extqh r2,r3,r2
3293 or r1.r2.r1 or r1,r2,r1
3294 sra r1,48,r1
3295
3296 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3297 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3298 lda r3,X(r11) lda r3,X(r11)
3299 extll r1,r3,r1 extll r1,r3,r1
3300 extlh r2,r3,r2 extlh r2,r3,r2
3301 or r1.r2.r1 addl r1,r2,r1
3302
3303 quad: ldq_u r1,X(r11)
3304 ldq_u r2,X+7(r11)
3305 lda r3,X(r11)
3306 extql r1,r3,r1
3307 extqh r2,r3,r2
3308 or r1.r2.r1
3309 */
3310
3311 void
3312 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3313 HOST_WIDE_INT ofs, int sign)
3314 {
3315 rtx meml, memh, addr, extl, exth, tmp, mema;
3316 enum machine_mode mode;
3317
3318 if (TARGET_BWX && size == 2)
3319 {
3320 meml = adjust_address (mem, QImode, ofs);
3321 memh = adjust_address (mem, QImode, ofs+1);
3322 if (BYTES_BIG_ENDIAN)
3323 tmp = meml, meml = memh, memh = tmp;
3324 extl = gen_reg_rtx (DImode);
3325 exth = gen_reg_rtx (DImode);
3326 emit_insn (gen_zero_extendqidi2 (extl, meml));
3327 emit_insn (gen_zero_extendqidi2 (exth, memh));
3328 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3329 NULL, 1, OPTAB_LIB_WIDEN);
3330 addr = expand_simple_binop (DImode, IOR, extl, exth,
3331 NULL, 1, OPTAB_LIB_WIDEN);
3332
3333 if (sign && GET_MODE (tgt) != HImode)
3334 {
3335 addr = gen_lowpart (HImode, addr);
3336 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3337 }
3338 else
3339 {
3340 if (GET_MODE (tgt) != DImode)
3341 addr = gen_lowpart (GET_MODE (tgt), addr);
3342 emit_move_insn (tgt, addr);
3343 }
3344 return;
3345 }
3346
3347 meml = gen_reg_rtx (DImode);
3348 memh = gen_reg_rtx (DImode);
3349 addr = gen_reg_rtx (DImode);
3350 extl = gen_reg_rtx (DImode);
3351 exth = gen_reg_rtx (DImode);
3352
3353 mema = XEXP (mem, 0);
3354 if (GET_CODE (mema) == LO_SUM)
3355 mema = force_reg (Pmode, mema);
3356
3357 /* AND addresses cannot be in any alias set, since they may implicitly
3358 alias surrounding code. Ideally we'd have some alias set that
3359 covered all types except those with alignment 8 or higher. */
3360
3361 tmp = change_address (mem, DImode,
3362 gen_rtx_AND (DImode,
3363 plus_constant (mema, ofs),
3364 GEN_INT (-8)));
3365 set_mem_alias_set (tmp, 0);
3366 emit_move_insn (meml, tmp);
3367
3368 tmp = change_address (mem, DImode,
3369 gen_rtx_AND (DImode,
3370 plus_constant (mema, ofs + size - 1),
3371 GEN_INT (-8)));
3372 set_mem_alias_set (tmp, 0);
3373 emit_move_insn (memh, tmp);
3374
3375 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3376 {
3377 emit_move_insn (addr, plus_constant (mema, -1));
3378
3379 emit_insn (gen_extqh_be (extl, meml, addr));
3380 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3381
3382 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3383 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3384 addr, 1, OPTAB_WIDEN);
3385 }
3386 else if (sign && size == 2)
3387 {
3388 emit_move_insn (addr, plus_constant (mema, ofs+2));
3389
3390 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3391 emit_insn (gen_extqh_le (exth, memh, addr));
3392
3393 /* We must use tgt here for the target. Alpha-vms port fails if we use
3394 addr for the target, because addr is marked as a pointer and combine
3395 knows that pointers are always sign-extended 32 bit values. */
3396 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3397 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3398 addr, 1, OPTAB_WIDEN);
3399 }
3400 else
3401 {
3402 if (WORDS_BIG_ENDIAN)
3403 {
3404 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3405 switch ((int) size)
3406 {
3407 case 2:
3408 emit_insn (gen_extwh_be (extl, meml, addr));
3409 mode = HImode;
3410 break;
3411
3412 case 4:
3413 emit_insn (gen_extlh_be (extl, meml, addr));
3414 mode = SImode;
3415 break;
3416
3417 case 8:
3418 emit_insn (gen_extqh_be (extl, meml, addr));
3419 mode = DImode;
3420 break;
3421
3422 default:
3423 gcc_unreachable ();
3424 }
3425 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3426 }
3427 else
3428 {
3429 emit_move_insn (addr, plus_constant (mema, ofs));
3430 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3431 switch ((int) size)
3432 {
3433 case 2:
3434 emit_insn (gen_extwh_le (exth, memh, addr));
3435 mode = HImode;
3436 break;
3437
3438 case 4:
3439 emit_insn (gen_extlh_le (exth, memh, addr));
3440 mode = SImode;
3441 break;
3442
3443 case 8:
3444 emit_insn (gen_extqh_le (exth, memh, addr));
3445 mode = DImode;
3446 break;
3447
3448 default:
3449 gcc_unreachable ();
3450 }
3451 }
3452
3453 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3454 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3455 sign, OPTAB_WIDEN);
3456 }
3457
3458 if (addr != tgt)
3459 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3460 }
3461
3462 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3463
3464 void
3465 alpha_expand_unaligned_store (rtx dst, rtx src,
3466 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3467 {
3468 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3469
3470 if (TARGET_BWX && size == 2)
3471 {
3472 if (src != const0_rtx)
3473 {
3474 dstl = gen_lowpart (QImode, src);
3475 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3476 NULL, 1, OPTAB_LIB_WIDEN);
3477 dsth = gen_lowpart (QImode, dsth);
3478 }
3479 else
3480 dstl = dsth = const0_rtx;
3481
3482 meml = adjust_address (dst, QImode, ofs);
3483 memh = adjust_address (dst, QImode, ofs+1);
3484 if (BYTES_BIG_ENDIAN)
3485 addr = meml, meml = memh, memh = addr;
3486
3487 emit_move_insn (meml, dstl);
3488 emit_move_insn (memh, dsth);
3489 return;
3490 }
3491
3492 dstl = gen_reg_rtx (DImode);
3493 dsth = gen_reg_rtx (DImode);
3494 insl = gen_reg_rtx (DImode);
3495 insh = gen_reg_rtx (DImode);
3496
3497 dsta = XEXP (dst, 0);
3498 if (GET_CODE (dsta) == LO_SUM)
3499 dsta = force_reg (Pmode, dsta);
3500
3501 /* AND addresses cannot be in any alias set, since they may implicitly
3502 alias surrounding code. Ideally we'd have some alias set that
3503 covered all types except those with alignment 8 or higher. */
3504
3505 meml = change_address (dst, DImode,
3506 gen_rtx_AND (DImode,
3507 plus_constant (dsta, ofs),
3508 GEN_INT (-8)));
3509 set_mem_alias_set (meml, 0);
3510
3511 memh = change_address (dst, DImode,
3512 gen_rtx_AND (DImode,
3513 plus_constant (dsta, ofs + size - 1),
3514 GEN_INT (-8)));
3515 set_mem_alias_set (memh, 0);
3516
3517 emit_move_insn (dsth, memh);
3518 emit_move_insn (dstl, meml);
3519 if (WORDS_BIG_ENDIAN)
3520 {
3521 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3522
3523 if (src != const0_rtx)
3524 {
3525 switch ((int) size)
3526 {
3527 case 2:
3528 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3529 break;
3530 case 4:
3531 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3532 break;
3533 case 8:
3534 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3535 break;
3536 }
3537 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3538 GEN_INT (size*8), addr));
3539 }
3540
3541 switch ((int) size)
3542 {
3543 case 2:
3544 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3545 break;
3546 case 4:
3547 {
3548 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3549 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3550 break;
3551 }
3552 case 8:
3553 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3554 break;
3555 }
3556
3557 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3558 }
3559 else
3560 {
3561 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3562
3563 if (src != CONST0_RTX (GET_MODE (src)))
3564 {
3565 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3566 GEN_INT (size*8), addr));
3567
3568 switch ((int) size)
3569 {
3570 case 2:
3571 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3572 break;
3573 case 4:
3574 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3575 break;
3576 case 8:
3577 emit_insn (gen_insql_le (insl, src, addr));
3578 break;
3579 }
3580 }
3581
3582 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3583
3584 switch ((int) size)
3585 {
3586 case 2:
3587 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3588 break;
3589 case 4:
3590 {
3591 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3592 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3593 break;
3594 }
3595 case 8:
3596 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3597 break;
3598 }
3599 }
3600
3601 if (src != CONST0_RTX (GET_MODE (src)))
3602 {
3603 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3604 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3605 }
3606
3607 if (WORDS_BIG_ENDIAN)
3608 {
3609 emit_move_insn (meml, dstl);
3610 emit_move_insn (memh, dsth);
3611 }
3612 else
3613 {
3614 /* Must store high before low for degenerate case of aligned. */
3615 emit_move_insn (memh, dsth);
3616 emit_move_insn (meml, dstl);
3617 }
3618 }
3619
3620 /* The block move code tries to maximize speed by separating loads and
3621 stores at the expense of register pressure: we load all of the data
3622 before we store it back out. There are two secondary effects worth
3623 mentioning, that this speeds copying to/from aligned and unaligned
3624 buffers, and that it makes the code significantly easier to write. */
3625
3626 #define MAX_MOVE_WORDS 8
3627
3628 /* Load an integral number of consecutive unaligned quadwords. */
3629
3630 static void
3631 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3632 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3633 {
3634 rtx const im8 = GEN_INT (-8);
3635 rtx const i64 = GEN_INT (64);
3636 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3637 rtx sreg, areg, tmp, smema;
3638 HOST_WIDE_INT i;
3639
3640 smema = XEXP (smem, 0);
3641 if (GET_CODE (smema) == LO_SUM)
3642 smema = force_reg (Pmode, smema);
3643
3644 /* Generate all the tmp registers we need. */
3645 for (i = 0; i < words; ++i)
3646 {
3647 data_regs[i] = out_regs[i];
3648 ext_tmps[i] = gen_reg_rtx (DImode);
3649 }
3650 data_regs[words] = gen_reg_rtx (DImode);
3651
3652 if (ofs != 0)
3653 smem = adjust_address (smem, GET_MODE (smem), ofs);
3654
3655 /* Load up all of the source data. */
3656 for (i = 0; i < words; ++i)
3657 {
3658 tmp = change_address (smem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (smema, 8*i),
3661 im8));
3662 set_mem_alias_set (tmp, 0);
3663 emit_move_insn (data_regs[i], tmp);
3664 }
3665
3666 tmp = change_address (smem, DImode,
3667 gen_rtx_AND (DImode,
3668 plus_constant (smema, 8*words - 1),
3669 im8));
3670 set_mem_alias_set (tmp, 0);
3671 emit_move_insn (data_regs[words], tmp);
3672
3673 /* Extract the half-word fragments. Unfortunately DEC decided to make
3674 extxh with offset zero a noop instead of zeroing the register, so
3675 we must take care of that edge condition ourselves with cmov. */
3676
3677 sreg = copy_addr_to_reg (smema);
3678 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3679 1, OPTAB_WIDEN);
3680 if (WORDS_BIG_ENDIAN)
3681 emit_move_insn (sreg, plus_constant (sreg, 7));
3682 for (i = 0; i < words; ++i)
3683 {
3684 if (WORDS_BIG_ENDIAN)
3685 {
3686 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3687 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3688 }
3689 else
3690 {
3691 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3692 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3693 }
3694 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3695 gen_rtx_IF_THEN_ELSE (DImode,
3696 gen_rtx_EQ (DImode, areg,
3697 const0_rtx),
3698 const0_rtx, ext_tmps[i])));
3699 }
3700
3701 /* Merge the half-words into whole words. */
3702 for (i = 0; i < words; ++i)
3703 {
3704 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3705 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3706 }
3707 }
3708
3709 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3710 may be NULL to store zeros. */
3711
3712 static void
3713 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3714 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3715 {
3716 rtx const im8 = GEN_INT (-8);
3717 rtx const i64 = GEN_INT (64);
3718 rtx ins_tmps[MAX_MOVE_WORDS];
3719 rtx st_tmp_1, st_tmp_2, dreg;
3720 rtx st_addr_1, st_addr_2, dmema;
3721 HOST_WIDE_INT i;
3722
3723 dmema = XEXP (dmem, 0);
3724 if (GET_CODE (dmema) == LO_SUM)
3725 dmema = force_reg (Pmode, dmema);
3726
3727 /* Generate all the tmp registers we need. */
3728 if (data_regs != NULL)
3729 for (i = 0; i < words; ++i)
3730 ins_tmps[i] = gen_reg_rtx(DImode);
3731 st_tmp_1 = gen_reg_rtx(DImode);
3732 st_tmp_2 = gen_reg_rtx(DImode);
3733
3734 if (ofs != 0)
3735 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3736
3737 st_addr_2 = change_address (dmem, DImode,
3738 gen_rtx_AND (DImode,
3739 plus_constant (dmema, words*8 - 1),
3740 im8));
3741 set_mem_alias_set (st_addr_2, 0);
3742
3743 st_addr_1 = change_address (dmem, DImode,
3744 gen_rtx_AND (DImode, dmema, im8));
3745 set_mem_alias_set (st_addr_1, 0);
3746
3747 /* Load up the destination end bits. */
3748 emit_move_insn (st_tmp_2, st_addr_2);
3749 emit_move_insn (st_tmp_1, st_addr_1);
3750
3751 /* Shift the input data into place. */
3752 dreg = copy_addr_to_reg (dmema);
3753 if (WORDS_BIG_ENDIAN)
3754 emit_move_insn (dreg, plus_constant (dreg, 7));
3755 if (data_regs != NULL)
3756 {
3757 for (i = words-1; i >= 0; --i)
3758 {
3759 if (WORDS_BIG_ENDIAN)
3760 {
3761 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3762 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3763 }
3764 else
3765 {
3766 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3767 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3768 }
3769 }
3770 for (i = words-1; i > 0; --i)
3771 {
3772 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3773 ins_tmps[i-1], ins_tmps[i-1], 1,
3774 OPTAB_WIDEN);
3775 }
3776 }
3777
3778 /* Split and merge the ends with the destination data. */
3779 if (WORDS_BIG_ENDIAN)
3780 {
3781 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3782 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3783 }
3784 else
3785 {
3786 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3787 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3788 }
3789
3790 if (data_regs != NULL)
3791 {
3792 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3793 st_tmp_2, 1, OPTAB_WIDEN);
3794 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3795 st_tmp_1, 1, OPTAB_WIDEN);
3796 }
3797
3798 /* Store it all. */
3799 if (WORDS_BIG_ENDIAN)
3800 emit_move_insn (st_addr_1, st_tmp_1);
3801 else
3802 emit_move_insn (st_addr_2, st_tmp_2);
3803 for (i = words-1; i > 0; --i)
3804 {
3805 rtx tmp = change_address (dmem, DImode,
3806 gen_rtx_AND (DImode,
3807 plus_constant(dmema,
3808 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3809 im8));
3810 set_mem_alias_set (tmp, 0);
3811 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3812 }
3813 if (WORDS_BIG_ENDIAN)
3814 emit_move_insn (st_addr_2, st_tmp_2);
3815 else
3816 emit_move_insn (st_addr_1, st_tmp_1);
3817 }
3818
3819
3820 /* Expand string/block move operations.
3821
3822 operands[0] is the pointer to the destination.
3823 operands[1] is the pointer to the source.
3824 operands[2] is the number of bytes to move.
3825 operands[3] is the alignment. */
3826
3827 int
3828 alpha_expand_block_move (rtx operands[])
3829 {
3830 rtx bytes_rtx = operands[2];
3831 rtx align_rtx = operands[3];
3832 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3833 HOST_WIDE_INT bytes = orig_bytes;
3834 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3835 HOST_WIDE_INT dst_align = src_align;
3836 rtx orig_src = operands[1];
3837 rtx orig_dst = operands[0];
3838 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3839 rtx tmp;
3840 unsigned int i, words, ofs, nregs = 0;
3841
3842 if (orig_bytes <= 0)
3843 return 1;
3844 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3845 return 0;
3846
3847 /* Look for additional alignment information from recorded register info. */
3848
3849 tmp = XEXP (orig_src, 0);
3850 if (GET_CODE (tmp) == REG)
3851 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3852 else if (GET_CODE (tmp) == PLUS
3853 && GET_CODE (XEXP (tmp, 0)) == REG
3854 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3855 {
3856 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3857 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3858
3859 if (a > src_align)
3860 {
3861 if (a >= 64 && c % 8 == 0)
3862 src_align = 64;
3863 else if (a >= 32 && c % 4 == 0)
3864 src_align = 32;
3865 else if (a >= 16 && c % 2 == 0)
3866 src_align = 16;
3867 }
3868 }
3869
3870 tmp = XEXP (orig_dst, 0);
3871 if (GET_CODE (tmp) == REG)
3872 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3873 else if (GET_CODE (tmp) == PLUS
3874 && GET_CODE (XEXP (tmp, 0)) == REG
3875 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3876 {
3877 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3878 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3879
3880 if (a > dst_align)
3881 {
3882 if (a >= 64 && c % 8 == 0)
3883 dst_align = 64;
3884 else if (a >= 32 && c % 4 == 0)
3885 dst_align = 32;
3886 else if (a >= 16 && c % 2 == 0)
3887 dst_align = 16;
3888 }
3889 }
3890
3891 ofs = 0;
3892 if (src_align >= 64 && bytes >= 8)
3893 {
3894 words = bytes / 8;
3895
3896 for (i = 0; i < words; ++i)
3897 data_regs[nregs + i] = gen_reg_rtx (DImode);
3898
3899 for (i = 0; i < words; ++i)
3900 emit_move_insn (data_regs[nregs + i],
3901 adjust_address (orig_src, DImode, ofs + i * 8));
3902
3903 nregs += words;
3904 bytes -= words * 8;
3905 ofs += words * 8;
3906 }
3907
3908 if (src_align >= 32 && bytes >= 4)
3909 {
3910 words = bytes / 4;
3911
3912 for (i = 0; i < words; ++i)
3913 data_regs[nregs + i] = gen_reg_rtx (SImode);
3914
3915 for (i = 0; i < words; ++i)
3916 emit_move_insn (data_regs[nregs + i],
3917 adjust_address (orig_src, SImode, ofs + i * 4));
3918
3919 nregs += words;
3920 bytes -= words * 4;
3921 ofs += words * 4;
3922 }
3923
3924 if (bytes >= 8)
3925 {
3926 words = bytes / 8;
3927
3928 for (i = 0; i < words+1; ++i)
3929 data_regs[nregs + i] = gen_reg_rtx (DImode);
3930
3931 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3932 words, ofs);
3933
3934 nregs += words;
3935 bytes -= words * 8;
3936 ofs += words * 8;
3937 }
3938
3939 if (! TARGET_BWX && bytes >= 4)
3940 {
3941 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3942 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3943 bytes -= 4;
3944 ofs += 4;
3945 }
3946
3947 if (bytes >= 2)
3948 {
3949 if (src_align >= 16)
3950 {
3951 do {
3952 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3953 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3954 bytes -= 2;
3955 ofs += 2;
3956 } while (bytes >= 2);
3957 }
3958 else if (! TARGET_BWX)
3959 {
3960 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3961 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3962 bytes -= 2;
3963 ofs += 2;
3964 }
3965 }
3966
3967 while (bytes > 0)
3968 {
3969 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3970 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3971 bytes -= 1;
3972 ofs += 1;
3973 }
3974
3975 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3976
3977 /* Now save it back out again. */
3978
3979 i = 0, ofs = 0;
3980
3981 /* Write out the data in whatever chunks reading the source allowed. */
3982 if (dst_align >= 64)
3983 {
3984 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3985 {
3986 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3987 data_regs[i]);
3988 ofs += 8;
3989 i++;
3990 }
3991 }
3992
3993 if (dst_align >= 32)
3994 {
3995 /* If the source has remaining DImode regs, write them out in
3996 two pieces. */
3997 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3998 {
3999 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4000 NULL_RTX, 1, OPTAB_WIDEN);
4001
4002 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4003 gen_lowpart (SImode, data_regs[i]));
4004 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4005 gen_lowpart (SImode, tmp));
4006 ofs += 8;
4007 i++;
4008 }
4009
4010 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4011 {
4012 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4013 data_regs[i]);
4014 ofs += 4;
4015 i++;
4016 }
4017 }
4018
4019 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4020 {
4021 /* Write out a remaining block of words using unaligned methods. */
4022
4023 for (words = 1; i + words < nregs; words++)
4024 if (GET_MODE (data_regs[i + words]) != DImode)
4025 break;
4026
4027 if (words == 1)
4028 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4029 else
4030 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4031 words, ofs);
4032
4033 i += words;
4034 ofs += words * 8;
4035 }
4036
4037 /* Due to the above, this won't be aligned. */
4038 /* ??? If we have more than one of these, consider constructing full
4039 words in registers and using alpha_expand_unaligned_store_words. */
4040 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4041 {
4042 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4043 ofs += 4;
4044 i++;
4045 }
4046
4047 if (dst_align >= 16)
4048 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4049 {
4050 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4051 i++;
4052 ofs += 2;
4053 }
4054 else
4055 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4056 {
4057 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4058 i++;
4059 ofs += 2;
4060 }
4061
4062 /* The remainder must be byte copies. */
4063 while (i < nregs)
4064 {
4065 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4066 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 1;
4069 }
4070
4071 return 1;
4072 }
4073
4074 int
4075 alpha_expand_block_clear (rtx operands[])
4076 {
4077 rtx bytes_rtx = operands[1];
4078 rtx align_rtx = operands[3];
4079 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4080 HOST_WIDE_INT bytes = orig_bytes;
4081 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4082 HOST_WIDE_INT alignofs = 0;
4083 rtx orig_dst = operands[0];
4084 rtx tmp;
4085 int i, words, ofs = 0;
4086
4087 if (orig_bytes <= 0)
4088 return 1;
4089 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4090 return 0;
4091
4092 /* Look for stricter alignment. */
4093 tmp = XEXP (orig_dst, 0);
4094 if (GET_CODE (tmp) == REG)
4095 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4096 else if (GET_CODE (tmp) == PLUS
4097 && GET_CODE (XEXP (tmp, 0)) == REG
4098 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4099 {
4100 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4101 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4102
4103 if (a > align)
4104 {
4105 if (a >= 64)
4106 align = a, alignofs = 8 - c % 8;
4107 else if (a >= 32)
4108 align = a, alignofs = 4 - c % 4;
4109 else if (a >= 16)
4110 align = a, alignofs = 2 - c % 2;
4111 }
4112 }
4113
4114 /* Handle an unaligned prefix first. */
4115
4116 if (alignofs > 0)
4117 {
4118 #if HOST_BITS_PER_WIDE_INT >= 64
4119 /* Given that alignofs is bounded by align, the only time BWX could
4120 generate three stores is for a 7 byte fill. Prefer two individual
4121 stores over a load/mask/store sequence. */
4122 if ((!TARGET_BWX || alignofs == 7)
4123 && align >= 32
4124 && !(alignofs == 4 && bytes >= 4))
4125 {
4126 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4127 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4128 rtx mem, tmp;
4129 HOST_WIDE_INT mask;
4130
4131 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4132 set_mem_alias_set (mem, 0);
4133
4134 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4135 if (bytes < alignofs)
4136 {
4137 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4138 ofs += bytes;
4139 bytes = 0;
4140 }
4141 else
4142 {
4143 bytes -= alignofs;
4144 ofs += alignofs;
4145 }
4146 alignofs = 0;
4147
4148 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4149 NULL_RTX, 1, OPTAB_WIDEN);
4150
4151 emit_move_insn (mem, tmp);
4152 }
4153 #endif
4154
4155 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4156 {
4157 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4158 bytes -= 1;
4159 ofs += 1;
4160 alignofs -= 1;
4161 }
4162 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4163 {
4164 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4165 bytes -= 2;
4166 ofs += 2;
4167 alignofs -= 2;
4168 }
4169 if (alignofs == 4 && bytes >= 4)
4170 {
4171 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4172 bytes -= 4;
4173 ofs += 4;
4174 alignofs = 0;
4175 }
4176
4177 /* If we've not used the extra lead alignment information by now,
4178 we won't be able to. Downgrade align to match what's left over. */
4179 if (alignofs > 0)
4180 {
4181 alignofs = alignofs & -alignofs;
4182 align = MIN (align, alignofs * BITS_PER_UNIT);
4183 }
4184 }
4185
4186 /* Handle a block of contiguous long-words. */
4187
4188 if (align >= 64 && bytes >= 8)
4189 {
4190 words = bytes / 8;
4191
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4194 const0_rtx);
4195
4196 bytes -= words * 8;
4197 ofs += words * 8;
4198 }
4199
4200 /* If the block is large and appropriately aligned, emit a single
4201 store followed by a sequence of stq_u insns. */
4202
4203 if (align >= 32 && bytes > 16)
4204 {
4205 rtx orig_dsta;
4206
4207 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4208 bytes -= 4;
4209 ofs += 4;
4210
4211 orig_dsta = XEXP (orig_dst, 0);
4212 if (GET_CODE (orig_dsta) == LO_SUM)
4213 orig_dsta = force_reg (Pmode, orig_dsta);
4214
4215 words = bytes / 8;
4216 for (i = 0; i < words; ++i)
4217 {
4218 rtx mem
4219 = change_address (orig_dst, DImode,
4220 gen_rtx_AND (DImode,
4221 plus_constant (orig_dsta, ofs + i*8),
4222 GEN_INT (-8)));
4223 set_mem_alias_set (mem, 0);
4224 emit_move_insn (mem, const0_rtx);
4225 }
4226
4227 /* Depending on the alignment, the first stq_u may have overlapped
4228 with the initial stl, which means that the last stq_u didn't
4229 write as much as it would appear. Leave those questionable bytes
4230 unaccounted for. */
4231 bytes -= words * 8 - 4;
4232 ofs += words * 8 - 4;
4233 }
4234
4235 /* Handle a smaller block of aligned words. */
4236
4237 if ((align >= 64 && bytes == 4)
4238 || (align == 32 && bytes >= 4))
4239 {
4240 words = bytes / 4;
4241
4242 for (i = 0; i < words; ++i)
4243 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4244 const0_rtx);
4245
4246 bytes -= words * 4;
4247 ofs += words * 4;
4248 }
4249
4250 /* An unaligned block uses stq_u stores for as many as possible. */
4251
4252 if (bytes >= 8)
4253 {
4254 words = bytes / 8;
4255
4256 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4257
4258 bytes -= words * 8;
4259 ofs += words * 8;
4260 }
4261
4262 /* Next clean up any trailing pieces. */
4263
4264 #if HOST_BITS_PER_WIDE_INT >= 64
4265 /* Count the number of bits in BYTES for which aligned stores could
4266 be emitted. */
4267 words = 0;
4268 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4269 if (bytes & i)
4270 words += 1;
4271
4272 /* If we have appropriate alignment (and it wouldn't take too many
4273 instructions otherwise), mask out the bytes we need. */
4274 if (TARGET_BWX ? words > 2 : bytes > 0)
4275 {
4276 if (align >= 64)
4277 {
4278 rtx mem, tmp;
4279 HOST_WIDE_INT mask;
4280
4281 mem = adjust_address (orig_dst, DImode, ofs);
4282 set_mem_alias_set (mem, 0);
4283
4284 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4285
4286 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4287 NULL_RTX, 1, OPTAB_WIDEN);
4288
4289 emit_move_insn (mem, tmp);
4290 return 1;
4291 }
4292 else if (align >= 32 && bytes < 4)
4293 {
4294 rtx mem, tmp;
4295 HOST_WIDE_INT mask;
4296
4297 mem = adjust_address (orig_dst, SImode, ofs);
4298 set_mem_alias_set (mem, 0);
4299
4300 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4301
4302 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4303 NULL_RTX, 1, OPTAB_WIDEN);
4304
4305 emit_move_insn (mem, tmp);
4306 return 1;
4307 }
4308 }
4309 #endif
4310
4311 if (!TARGET_BWX && bytes >= 4)
4312 {
4313 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4314 bytes -= 4;
4315 ofs += 4;
4316 }
4317
4318 if (bytes >= 2)
4319 {
4320 if (align >= 16)
4321 {
4322 do {
4323 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4324 const0_rtx);
4325 bytes -= 2;
4326 ofs += 2;
4327 } while (bytes >= 2);
4328 }
4329 else if (! TARGET_BWX)
4330 {
4331 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4332 bytes -= 2;
4333 ofs += 2;
4334 }
4335 }
4336
4337 while (bytes > 0)
4338 {
4339 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4340 bytes -= 1;
4341 ofs += 1;
4342 }
4343
4344 return 1;
4345 }
4346
4347 /* Returns a mask so that zap(x, value) == x & mask. */
4348
4349 rtx
4350 alpha_expand_zap_mask (HOST_WIDE_INT value)
4351 {
4352 rtx result;
4353 int i;
4354
4355 if (HOST_BITS_PER_WIDE_INT >= 64)
4356 {
4357 HOST_WIDE_INT mask = 0;
4358
4359 for (i = 7; i >= 0; --i)
4360 {
4361 mask <<= 8;
4362 if (!((value >> i) & 1))
4363 mask |= 0xff;
4364 }
4365
4366 result = gen_int_mode (mask, DImode);
4367 }
4368 else
4369 {
4370 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4371
4372 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4373
4374 for (i = 7; i >= 4; --i)
4375 {
4376 mask_hi <<= 8;
4377 if (!((value >> i) & 1))
4378 mask_hi |= 0xff;
4379 }
4380
4381 for (i = 3; i >= 0; --i)
4382 {
4383 mask_lo <<= 8;
4384 if (!((value >> i) & 1))
4385 mask_lo |= 0xff;
4386 }
4387
4388 result = immed_double_const (mask_lo, mask_hi, DImode);
4389 }
4390
4391 return result;
4392 }
4393
4394 void
4395 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4396 enum machine_mode mode,
4397 rtx op0, rtx op1, rtx op2)
4398 {
4399 op0 = gen_lowpart (mode, op0);
4400
4401 if (op1 == const0_rtx)
4402 op1 = CONST0_RTX (mode);
4403 else
4404 op1 = gen_lowpart (mode, op1);
4405
4406 if (op2 == const0_rtx)
4407 op2 = CONST0_RTX (mode);
4408 else
4409 op2 = gen_lowpart (mode, op2);
4410
4411 emit_insn ((*gen) (op0, op1, op2));
4412 }
4413
4414 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4415 COND is true. Mark the jump as unlikely to be taken. */
4416
4417 static void
4418 emit_unlikely_jump (rtx cond, rtx label)
4419 {
4420 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4421 rtx x;
4422
4423 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4424 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4425 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4426 }
4427
4428 /* A subroutine of the atomic operation splitters. Emit a load-locked
4429 instruction in MODE. */
4430
4431 static void
4432 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4433 {
4434 rtx (*fn) (rtx, rtx) = NULL;
4435 if (mode == SImode)
4436 fn = gen_load_locked_si;
4437 else if (mode == DImode)
4438 fn = gen_load_locked_di;
4439 emit_insn (fn (reg, mem));
4440 }
4441
4442 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4443 instruction in MODE. */
4444
4445 static void
4446 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4447 {
4448 rtx (*fn) (rtx, rtx, rtx) = NULL;
4449 if (mode == SImode)
4450 fn = gen_store_conditional_si;
4451 else if (mode == DImode)
4452 fn = gen_store_conditional_di;
4453 emit_insn (fn (res, mem, val));
4454 }
4455
4456 /* A subroutine of the atomic operation splitters. Emit an insxl
4457 instruction in MODE. */
4458
4459 static rtx
4460 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4461 {
4462 rtx ret = gen_reg_rtx (DImode);
4463 rtx (*fn) (rtx, rtx, rtx);
4464
4465 if (WORDS_BIG_ENDIAN)
4466 {
4467 if (mode == QImode)
4468 fn = gen_insbl_be;
4469 else
4470 fn = gen_inswl_be;
4471 }
4472 else
4473 {
4474 if (mode == QImode)
4475 fn = gen_insbl_le;
4476 else
4477 fn = gen_inswl_le;
4478 }
4479 emit_insn (fn (ret, op1, op2));
4480
4481 return ret;
4482 }
4483
4484 /* Expand an an atomic fetch-and-operate pattern. CODE is the binary operation
4485 to perform. MEM is the memory on which to operate. VAL is the second
4486 operand of the binary operator. BEFORE and AFTER are optional locations to
4487 return the value of MEM either before of after the operation. SCRATCH is
4488 a scratch register. */
4489
4490 void
4491 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4492 rtx before, rtx after, rtx scratch)
4493 {
4494 enum machine_mode mode = GET_MODE (mem);
4495 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4496
4497 emit_insn (gen_memory_barrier ());
4498
4499 label = gen_label_rtx ();
4500 emit_label (label);
4501 label = gen_rtx_LABEL_REF (DImode, label);
4502
4503 if (before == NULL)
4504 before = scratch;
4505 emit_load_locked (mode, before, mem);
4506
4507 if (code == NOT)
4508 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4509 else
4510 x = gen_rtx_fmt_ee (code, mode, before, val);
4511 if (after)
4512 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4513 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4514
4515 emit_store_conditional (mode, cond, mem, scratch);
4516
4517 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4518 emit_unlikely_jump (x, label);
4519
4520 emit_insn (gen_memory_barrier ());
4521 }
4522
4523 /* Expand a compare and swap operation. */
4524
4525 void
4526 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4527 rtx scratch)
4528 {
4529 enum machine_mode mode = GET_MODE (mem);
4530 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4531
4532 emit_insn (gen_memory_barrier ());
4533
4534 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4535 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4536 emit_label (XEXP (label1, 0));
4537
4538 emit_load_locked (mode, retval, mem);
4539
4540 x = gen_lowpart (DImode, retval);
4541 if (oldval == const0_rtx)
4542 x = gen_rtx_NE (DImode, x, const0_rtx);
4543 else
4544 {
4545 x = gen_rtx_EQ (DImode, x, oldval);
4546 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4547 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4548 }
4549 emit_unlikely_jump (x, label2);
4550
4551 emit_move_insn (scratch, newval);
4552 emit_store_conditional (mode, cond, mem, scratch);
4553
4554 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4555 emit_unlikely_jump (x, label1);
4556
4557 emit_insn (gen_memory_barrier ());
4558 emit_label (XEXP (label2, 0));
4559 }
4560
4561 void
4562 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4563 {
4564 enum machine_mode mode = GET_MODE (mem);
4565 rtx addr, align, wdst;
4566 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4567
4568 addr = force_reg (DImode, XEXP (mem, 0));
4569 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4570 NULL_RTX, 1, OPTAB_DIRECT);
4571
4572 oldval = convert_modes (DImode, mode, oldval, 1);
4573 newval = emit_insxl (mode, newval, addr);
4574
4575 wdst = gen_reg_rtx (DImode);
4576 if (mode == QImode)
4577 fn5 = gen_sync_compare_and_swapqi_1;
4578 else
4579 fn5 = gen_sync_compare_and_swaphi_1;
4580 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4581
4582 emit_move_insn (dst, gen_lowpart (mode, wdst));
4583 }
4584
4585 void
4586 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4587 rtx oldval, rtx newval, rtx align,
4588 rtx scratch, rtx cond)
4589 {
4590 rtx label1, label2, mem, width, mask, x;
4591
4592 mem = gen_rtx_MEM (DImode, align);
4593 MEM_VOLATILE_P (mem) = 1;
4594
4595 emit_insn (gen_memory_barrier ());
4596 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4598 emit_label (XEXP (label1, 0));
4599
4600 emit_load_locked (DImode, scratch, mem);
4601
4602 width = GEN_INT (GET_MODE_BITSIZE (mode));
4603 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4604 if (WORDS_BIG_ENDIAN)
4605 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4606 else
4607 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4608
4609 if (oldval == const0_rtx)
4610 x = gen_rtx_NE (DImode, dest, const0_rtx);
4611 else
4612 {
4613 x = gen_rtx_EQ (DImode, dest, oldval);
4614 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4615 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4616 }
4617 emit_unlikely_jump (x, label2);
4618
4619 if (WORDS_BIG_ENDIAN)
4620 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4621 else
4622 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4623 emit_insn (gen_iordi3 (scratch, scratch, newval));
4624
4625 emit_store_conditional (DImode, scratch, mem, scratch);
4626
4627 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4628 emit_unlikely_jump (x, label1);
4629
4630 emit_insn (gen_memory_barrier ());
4631 emit_label (XEXP (label2, 0));
4632 }
4633
4634 /* Expand an atomic exchange operation. */
4635
4636 void
4637 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4638 {
4639 enum machine_mode mode = GET_MODE (mem);
4640 rtx label, x, cond = gen_lowpart (DImode, scratch);
4641
4642 emit_insn (gen_memory_barrier ());
4643
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4646
4647 emit_load_locked (mode, retval, mem);
4648 emit_move_insn (scratch, val);
4649 emit_store_conditional (mode, cond, mem, scratch);
4650
4651 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4652 emit_unlikely_jump (x, label);
4653 }
4654
4655 void
4656 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4657 {
4658 enum machine_mode mode = GET_MODE (mem);
4659 rtx addr, align, wdst;
4660 rtx (*fn4) (rtx, rtx, rtx, rtx);
4661
4662 /* Force the address into a register. */
4663 addr = force_reg (DImode, XEXP (mem, 0));
4664
4665 /* Align it to a multiple of 8. */
4666 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4667 NULL_RTX, 1, OPTAB_DIRECT);
4668
4669 /* Insert val into the correct byte location within the word. */
4670 val = emit_insxl (mode, val, addr);
4671
4672 wdst = gen_reg_rtx (DImode);
4673 if (mode == QImode)
4674 fn4 = gen_sync_lock_test_and_setqi_1;
4675 else
4676 fn4 = gen_sync_lock_test_and_sethi_1;
4677 emit_insn (fn4 (wdst, addr, val, align));
4678
4679 emit_move_insn (dst, gen_lowpart (mode, wdst));
4680 }
4681
4682 void
4683 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4684 rtx val, rtx align, rtx scratch)
4685 {
4686 rtx label, mem, width, mask, x;
4687
4688 mem = gen_rtx_MEM (DImode, align);
4689 MEM_VOLATILE_P (mem) = 1;
4690
4691 emit_insn (gen_memory_barrier ());
4692 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4693 emit_label (XEXP (label, 0));
4694
4695 emit_load_locked (DImode, scratch, mem);
4696
4697 width = GEN_INT (GET_MODE_BITSIZE (mode));
4698 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4699 if (WORDS_BIG_ENDIAN)
4700 {
4701 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4702 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4703 }
4704 else
4705 {
4706 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4707 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4708 }
4709 emit_insn (gen_iordi3 (scratch, scratch, val));
4710
4711 emit_store_conditional (DImode, scratch, mem, scratch);
4712
4713 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4714 emit_unlikely_jump (x, label);
4715 }
4716 \f
4717 /* Adjust the cost of a scheduling dependency. Return the new cost of
4718 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4719
4720 static int
4721 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4722 {
4723 enum attr_type insn_type, dep_insn_type;
4724
4725 /* If the dependence is an anti-dependence, there is no cost. For an
4726 output dependence, there is sometimes a cost, but it doesn't seem
4727 worth handling those few cases. */
4728 if (REG_NOTE_KIND (link) != 0)
4729 return cost;
4730
4731 /* If we can't recognize the insns, we can't really do anything. */
4732 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4733 return cost;
4734
4735 insn_type = get_attr_type (insn);
4736 dep_insn_type = get_attr_type (dep_insn);
4737
4738 /* Bring in the user-defined memory latency. */
4739 if (dep_insn_type == TYPE_ILD
4740 || dep_insn_type == TYPE_FLD
4741 || dep_insn_type == TYPE_LDSYM)
4742 cost += alpha_memory_latency-1;
4743
4744 /* Everything else handled in DFA bypasses now. */
4745
4746 return cost;
4747 }
4748
4749 /* The number of instructions that can be issued per cycle. */
4750
4751 static int
4752 alpha_issue_rate (void)
4753 {
4754 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4755 }
4756
4757 /* How many alternative schedules to try. This should be as wide as the
4758 scheduling freedom in the DFA, but no wider. Making this value too
4759 large results extra work for the scheduler.
4760
4761 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4762 alternative schedules. For EV5, we can choose between E0/E1 and
4763 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4764
4765 static int
4766 alpha_multipass_dfa_lookahead (void)
4767 {
4768 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4769 }
4770 \f
4771 /* Machine-specific function data. */
4772
4773 struct machine_function GTY(())
4774 {
4775 /* For unicosmk. */
4776 /* List of call information words for calls from this function. */
4777 struct rtx_def *first_ciw;
4778 struct rtx_def *last_ciw;
4779 int ciw_count;
4780
4781 /* List of deferred case vectors. */
4782 struct rtx_def *addr_list;
4783
4784 /* For OSF. */
4785 const char *some_ld_name;
4786
4787 /* For TARGET_LD_BUGGY_LDGP. */
4788 struct rtx_def *gp_save_rtx;
4789 };
4790
4791 /* How to allocate a 'struct machine_function'. */
4792
4793 static struct machine_function *
4794 alpha_init_machine_status (void)
4795 {
4796 return ((struct machine_function *)
4797 ggc_alloc_cleared (sizeof (struct machine_function)));
4798 }
4799
4800 /* Functions to save and restore alpha_return_addr_rtx. */
4801
4802 /* Start the ball rolling with RETURN_ADDR_RTX. */
4803
4804 rtx
4805 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4806 {
4807 if (count != 0)
4808 return const0_rtx;
4809
4810 return get_hard_reg_initial_val (Pmode, REG_RA);
4811 }
4812
4813 /* Return or create a memory slot containing the gp value for the current
4814 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4815
4816 rtx
4817 alpha_gp_save_rtx (void)
4818 {
4819 rtx seq, m = cfun->machine->gp_save_rtx;
4820
4821 if (m == NULL)
4822 {
4823 start_sequence ();
4824
4825 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4826 m = validize_mem (m);
4827 emit_move_insn (m, pic_offset_table_rtx);
4828
4829 seq = get_insns ();
4830 end_sequence ();
4831 emit_insn_after (seq, entry_of_function ());
4832
4833 cfun->machine->gp_save_rtx = m;
4834 }
4835
4836 return m;
4837 }
4838
4839 static int
4840 alpha_ra_ever_killed (void)
4841 {
4842 rtx top;
4843
4844 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4845 return regs_ever_live[REG_RA];
4846
4847 push_topmost_sequence ();
4848 top = get_insns ();
4849 pop_topmost_sequence ();
4850
4851 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4852 }
4853
4854 \f
4855 /* Return the trap mode suffix applicable to the current
4856 instruction, or NULL. */
4857
4858 static const char *
4859 get_trap_mode_suffix (void)
4860 {
4861 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4862
4863 switch (s)
4864 {
4865 case TRAP_SUFFIX_NONE:
4866 return NULL;
4867
4868 case TRAP_SUFFIX_SU:
4869 if (alpha_fptm >= ALPHA_FPTM_SU)
4870 return "su";
4871 return NULL;
4872
4873 case TRAP_SUFFIX_SUI:
4874 if (alpha_fptm >= ALPHA_FPTM_SUI)
4875 return "sui";
4876 return NULL;
4877
4878 case TRAP_SUFFIX_V_SV:
4879 switch (alpha_fptm)
4880 {
4881 case ALPHA_FPTM_N:
4882 return NULL;
4883 case ALPHA_FPTM_U:
4884 return "v";
4885 case ALPHA_FPTM_SU:
4886 case ALPHA_FPTM_SUI:
4887 return "sv";
4888 default:
4889 gcc_unreachable ();
4890 }
4891
4892 case TRAP_SUFFIX_V_SV_SVI:
4893 switch (alpha_fptm)
4894 {
4895 case ALPHA_FPTM_N:
4896 return NULL;
4897 case ALPHA_FPTM_U:
4898 return "v";
4899 case ALPHA_FPTM_SU:
4900 return "sv";
4901 case ALPHA_FPTM_SUI:
4902 return "svi";
4903 default:
4904 gcc_unreachable ();
4905 }
4906 break;
4907
4908 case TRAP_SUFFIX_U_SU_SUI:
4909 switch (alpha_fptm)
4910 {
4911 case ALPHA_FPTM_N:
4912 return NULL;
4913 case ALPHA_FPTM_U:
4914 return "u";
4915 case ALPHA_FPTM_SU:
4916 return "su";
4917 case ALPHA_FPTM_SUI:
4918 return "sui";
4919 default:
4920 gcc_unreachable ();
4921 }
4922 break;
4923
4924 default:
4925 gcc_unreachable ();
4926 }
4927 gcc_unreachable ();
4928 }
4929
4930 /* Return the rounding mode suffix applicable to the current
4931 instruction, or NULL. */
4932
4933 static const char *
4934 get_round_mode_suffix (void)
4935 {
4936 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4937
4938 switch (s)
4939 {
4940 case ROUND_SUFFIX_NONE:
4941 return NULL;
4942 case ROUND_SUFFIX_NORMAL:
4943 switch (alpha_fprm)
4944 {
4945 case ALPHA_FPRM_NORM:
4946 return NULL;
4947 case ALPHA_FPRM_MINF:
4948 return "m";
4949 case ALPHA_FPRM_CHOP:
4950 return "c";
4951 case ALPHA_FPRM_DYN:
4952 return "d";
4953 default:
4954 gcc_unreachable ();
4955 }
4956 break;
4957
4958 case ROUND_SUFFIX_C:
4959 return "c";
4960
4961 default:
4962 gcc_unreachable ();
4963 }
4964 gcc_unreachable ();
4965 }
4966
4967 /* Locate some local-dynamic symbol still in use by this function
4968 so that we can print its name in some movdi_er_tlsldm pattern. */
4969
4970 static int
4971 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4972 {
4973 rtx x = *px;
4974
4975 if (GET_CODE (x) == SYMBOL_REF
4976 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4977 {
4978 cfun->machine->some_ld_name = XSTR (x, 0);
4979 return 1;
4980 }
4981
4982 return 0;
4983 }
4984
4985 static const char *
4986 get_some_local_dynamic_name (void)
4987 {
4988 rtx insn;
4989
4990 if (cfun->machine->some_ld_name)
4991 return cfun->machine->some_ld_name;
4992
4993 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4994 if (INSN_P (insn)
4995 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4996 return cfun->machine->some_ld_name;
4997
4998 gcc_unreachable ();
4999 }
5000
5001 /* Print an operand. Recognize special options, documented below. */
5002
5003 void
5004 print_operand (FILE *file, rtx x, int code)
5005 {
5006 int i;
5007
5008 switch (code)
5009 {
5010 case '~':
5011 /* Print the assembler name of the current function. */
5012 assemble_name (file, alpha_fnname);
5013 break;
5014
5015 case '&':
5016 assemble_name (file, get_some_local_dynamic_name ());
5017 break;
5018
5019 case '/':
5020 {
5021 const char *trap = get_trap_mode_suffix ();
5022 const char *round = get_round_mode_suffix ();
5023
5024 if (trap || round)
5025 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5026 (trap ? trap : ""), (round ? round : ""));
5027 break;
5028 }
5029
5030 case ',':
5031 /* Generates single precision instruction suffix. */
5032 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5033 break;
5034
5035 case '-':
5036 /* Generates double precision instruction suffix. */
5037 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5038 break;
5039
5040 case '+':
5041 /* Generates a nop after a noreturn call at the very end of the
5042 function. */
5043 if (next_real_insn (current_output_insn) == 0)
5044 fprintf (file, "\n\tnop");
5045 break;
5046
5047 case '#':
5048 if (alpha_this_literal_sequence_number == 0)
5049 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5050 fprintf (file, "%d", alpha_this_literal_sequence_number);
5051 break;
5052
5053 case '*':
5054 if (alpha_this_gpdisp_sequence_number == 0)
5055 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5056 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5057 break;
5058
5059 case 'H':
5060 if (GET_CODE (x) == HIGH)
5061 output_addr_const (file, XEXP (x, 0));
5062 else
5063 output_operand_lossage ("invalid %%H value");
5064 break;
5065
5066 case 'J':
5067 {
5068 const char *lituse;
5069
5070 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5071 {
5072 x = XVECEXP (x, 0, 0);
5073 lituse = "lituse_tlsgd";
5074 }
5075 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5076 {
5077 x = XVECEXP (x, 0, 0);
5078 lituse = "lituse_tlsldm";
5079 }
5080 else if (GET_CODE (x) == CONST_INT)
5081 lituse = "lituse_jsr";
5082 else
5083 {
5084 output_operand_lossage ("invalid %%J value");
5085 break;
5086 }
5087
5088 if (x != const0_rtx)
5089 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5090 }
5091 break;
5092
5093 case 'j':
5094 {
5095 const char *lituse;
5096
5097 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5098 lituse = "lituse_jsrdirect";
5099 #else
5100 lituse = "lituse_jsr";
5101 #endif
5102
5103 gcc_assert (INTVAL (x) != 0);
5104 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5105 }
5106 break;
5107 case 'r':
5108 /* If this operand is the constant zero, write it as "$31". */
5109 if (GET_CODE (x) == REG)
5110 fprintf (file, "%s", reg_names[REGNO (x)]);
5111 else if (x == CONST0_RTX (GET_MODE (x)))
5112 fprintf (file, "$31");
5113 else
5114 output_operand_lossage ("invalid %%r value");
5115 break;
5116
5117 case 'R':
5118 /* Similar, but for floating-point. */
5119 if (GET_CODE (x) == REG)
5120 fprintf (file, "%s", reg_names[REGNO (x)]);
5121 else if (x == CONST0_RTX (GET_MODE (x)))
5122 fprintf (file, "$f31");
5123 else
5124 output_operand_lossage ("invalid %%R value");
5125 break;
5126
5127 case 'N':
5128 /* Write the 1's complement of a constant. */
5129 if (GET_CODE (x) != CONST_INT)
5130 output_operand_lossage ("invalid %%N value");
5131
5132 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5133 break;
5134
5135 case 'P':
5136 /* Write 1 << C, for a constant C. */
5137 if (GET_CODE (x) != CONST_INT)
5138 output_operand_lossage ("invalid %%P value");
5139
5140 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5141 break;
5142
5143 case 'h':
5144 /* Write the high-order 16 bits of a constant, sign-extended. */
5145 if (GET_CODE (x) != CONST_INT)
5146 output_operand_lossage ("invalid %%h value");
5147
5148 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5149 break;
5150
5151 case 'L':
5152 /* Write the low-order 16 bits of a constant, sign-extended. */
5153 if (GET_CODE (x) != CONST_INT)
5154 output_operand_lossage ("invalid %%L value");
5155
5156 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5157 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5158 break;
5159
5160 case 'm':
5161 /* Write mask for ZAP insn. */
5162 if (GET_CODE (x) == CONST_DOUBLE)
5163 {
5164 HOST_WIDE_INT mask = 0;
5165 HOST_WIDE_INT value;
5166
5167 value = CONST_DOUBLE_LOW (x);
5168 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5169 i++, value >>= 8)
5170 if (value & 0xff)
5171 mask |= (1 << i);
5172
5173 value = CONST_DOUBLE_HIGH (x);
5174 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5175 i++, value >>= 8)
5176 if (value & 0xff)
5177 mask |= (1 << (i + sizeof (int)));
5178
5179 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5180 }
5181
5182 else if (GET_CODE (x) == CONST_INT)
5183 {
5184 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5185
5186 for (i = 0; i < 8; i++, value >>= 8)
5187 if (value & 0xff)
5188 mask |= (1 << i);
5189
5190 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5191 }
5192 else
5193 output_operand_lossage ("invalid %%m value");
5194 break;
5195
5196 case 'M':
5197 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5198 if (GET_CODE (x) != CONST_INT
5199 || (INTVAL (x) != 8 && INTVAL (x) != 16
5200 && INTVAL (x) != 32 && INTVAL (x) != 64))
5201 output_operand_lossage ("invalid %%M value");
5202
5203 fprintf (file, "%s",
5204 (INTVAL (x) == 8 ? "b"
5205 : INTVAL (x) == 16 ? "w"
5206 : INTVAL (x) == 32 ? "l"
5207 : "q"));
5208 break;
5209
5210 case 'U':
5211 /* Similar, except do it from the mask. */
5212 if (GET_CODE (x) == CONST_INT)
5213 {
5214 HOST_WIDE_INT value = INTVAL (x);
5215
5216 if (value == 0xff)
5217 {
5218 fputc ('b', file);
5219 break;
5220 }
5221 if (value == 0xffff)
5222 {
5223 fputc ('w', file);
5224 break;
5225 }
5226 if (value == 0xffffffff)
5227 {
5228 fputc ('l', file);
5229 break;
5230 }
5231 if (value == -1)
5232 {
5233 fputc ('q', file);
5234 break;
5235 }
5236 }
5237 else if (HOST_BITS_PER_WIDE_INT == 32
5238 && GET_CODE (x) == CONST_DOUBLE
5239 && CONST_DOUBLE_LOW (x) == 0xffffffff
5240 && CONST_DOUBLE_HIGH (x) == 0)
5241 {
5242 fputc ('l', file);
5243 break;
5244 }
5245 output_operand_lossage ("invalid %%U value");
5246 break;
5247
5248 case 's':
5249 /* Write the constant value divided by 8 for little-endian mode or
5250 (56 - value) / 8 for big-endian mode. */
5251
5252 if (GET_CODE (x) != CONST_INT
5253 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5254 ? 56
5255 : 64)
5256 || (INTVAL (x) & 7) != 0)
5257 output_operand_lossage ("invalid %%s value");
5258
5259 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5260 WORDS_BIG_ENDIAN
5261 ? (56 - INTVAL (x)) / 8
5262 : INTVAL (x) / 8);
5263 break;
5264
5265 case 'S':
5266 /* Same, except compute (64 - c) / 8 */
5267
5268 if (GET_CODE (x) != CONST_INT
5269 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5270 && (INTVAL (x) & 7) != 8)
5271 output_operand_lossage ("invalid %%s value");
5272
5273 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5274 break;
5275
5276 case 't':
5277 {
5278 /* On Unicos/Mk systems: use a DEX expression if the symbol
5279 clashes with a register name. */
5280 int dex = unicosmk_need_dex (x);
5281 if (dex)
5282 fprintf (file, "DEX(%d)", dex);
5283 else
5284 output_addr_const (file, x);
5285 }
5286 break;
5287
5288 case 'C': case 'D': case 'c': case 'd':
5289 /* Write out comparison name. */
5290 {
5291 enum rtx_code c = GET_CODE (x);
5292
5293 if (!COMPARISON_P (x))
5294 output_operand_lossage ("invalid %%C value");
5295
5296 else if (code == 'D')
5297 c = reverse_condition (c);
5298 else if (code == 'c')
5299 c = swap_condition (c);
5300 else if (code == 'd')
5301 c = swap_condition (reverse_condition (c));
5302
5303 if (c == LEU)
5304 fprintf (file, "ule");
5305 else if (c == LTU)
5306 fprintf (file, "ult");
5307 else if (c == UNORDERED)
5308 fprintf (file, "un");
5309 else
5310 fprintf (file, "%s", GET_RTX_NAME (c));
5311 }
5312 break;
5313
5314 case 'E':
5315 /* Write the divide or modulus operator. */
5316 switch (GET_CODE (x))
5317 {
5318 case DIV:
5319 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5320 break;
5321 case UDIV:
5322 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5323 break;
5324 case MOD:
5325 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5326 break;
5327 case UMOD:
5328 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5329 break;
5330 default:
5331 output_operand_lossage ("invalid %%E value");
5332 break;
5333 }
5334 break;
5335
5336 case 'A':
5337 /* Write "_u" for unaligned access. */
5338 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5339 fprintf (file, "_u");
5340 break;
5341
5342 case 0:
5343 if (GET_CODE (x) == REG)
5344 fprintf (file, "%s", reg_names[REGNO (x)]);
5345 else if (GET_CODE (x) == MEM)
5346 output_address (XEXP (x, 0));
5347 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5348 {
5349 switch (XINT (XEXP (x, 0), 1))
5350 {
5351 case UNSPEC_DTPREL:
5352 case UNSPEC_TPREL:
5353 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5354 break;
5355 default:
5356 output_operand_lossage ("unknown relocation unspec");
5357 break;
5358 }
5359 }
5360 else
5361 output_addr_const (file, x);
5362 break;
5363
5364 default:
5365 output_operand_lossage ("invalid %%xn code");
5366 }
5367 }
5368
5369 void
5370 print_operand_address (FILE *file, rtx addr)
5371 {
5372 int basereg = 31;
5373 HOST_WIDE_INT offset = 0;
5374
5375 if (GET_CODE (addr) == AND)
5376 addr = XEXP (addr, 0);
5377
5378 if (GET_CODE (addr) == PLUS
5379 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5380 {
5381 offset = INTVAL (XEXP (addr, 1));
5382 addr = XEXP (addr, 0);
5383 }
5384
5385 if (GET_CODE (addr) == LO_SUM)
5386 {
5387 const char *reloc16, *reloclo;
5388 rtx op1 = XEXP (addr, 1);
5389
5390 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5391 {
5392 op1 = XEXP (op1, 0);
5393 switch (XINT (op1, 1))
5394 {
5395 case UNSPEC_DTPREL:
5396 reloc16 = NULL;
5397 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5398 break;
5399 case UNSPEC_TPREL:
5400 reloc16 = NULL;
5401 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5402 break;
5403 default:
5404 output_operand_lossage ("unknown relocation unspec");
5405 return;
5406 }
5407
5408 output_addr_const (file, XVECEXP (op1, 0, 0));
5409 }
5410 else
5411 {
5412 reloc16 = "gprel";
5413 reloclo = "gprellow";
5414 output_addr_const (file, op1);
5415 }
5416
5417 if (offset)
5418 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5419
5420 addr = XEXP (addr, 0);
5421 switch (GET_CODE (addr))
5422 {
5423 case REG:
5424 basereg = REGNO (addr);
5425 break;
5426
5427 case SUBREG:
5428 basereg = subreg_regno (addr);
5429 break;
5430
5431 default:
5432 gcc_unreachable ();
5433 }
5434
5435 fprintf (file, "($%d)\t\t!%s", basereg,
5436 (basereg == 29 ? reloc16 : reloclo));
5437 return;
5438 }
5439
5440 switch (GET_CODE (addr))
5441 {
5442 case REG:
5443 basereg = REGNO (addr);
5444 break;
5445
5446 case SUBREG:
5447 basereg = subreg_regno (addr);
5448 break;
5449
5450 case CONST_INT:
5451 offset = INTVAL (addr);
5452 break;
5453
5454 #if TARGET_ABI_OPEN_VMS
5455 case SYMBOL_REF:
5456 fprintf (file, "%s", XSTR (addr, 0));
5457 return;
5458
5459 case CONST:
5460 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5461 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5462 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5463 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5464 INTVAL (XEXP (XEXP (addr, 0), 1)));
5465 return;
5466
5467 #endif
5468 default:
5469 gcc_unreachable ();
5470 }
5471
5472 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5473 }
5474 \f
5475 /* Emit RTL insns to initialize the variable parts of a trampoline at
5476 TRAMP. FNADDR is an RTX for the address of the function's pure
5477 code. CXT is an RTX for the static chain value for the function.
5478
5479 The three offset parameters are for the individual template's
5480 layout. A JMPOFS < 0 indicates that the trampoline does not
5481 contain instructions at all.
5482
5483 We assume here that a function will be called many more times than
5484 its address is taken (e.g., it might be passed to qsort), so we
5485 take the trouble to initialize the "hint" field in the JMP insn.
5486 Note that the hint field is PC (new) + 4 * bits 13:0. */
5487
5488 void
5489 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5490 int fnofs, int cxtofs, int jmpofs)
5491 {
5492 rtx temp, temp1, addr;
5493 /* VMS really uses DImode pointers in memory at this point. */
5494 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5495
5496 #ifdef POINTERS_EXTEND_UNSIGNED
5497 fnaddr = convert_memory_address (mode, fnaddr);
5498 cxt = convert_memory_address (mode, cxt);
5499 #endif
5500
5501 /* Store function address and CXT. */
5502 addr = memory_address (mode, plus_constant (tramp, fnofs));
5503 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5504 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5505 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5506
5507 /* This has been disabled since the hint only has a 32k range, and in
5508 no existing OS is the stack within 32k of the text segment. */
5509 if (0 && jmpofs >= 0)
5510 {
5511 /* Compute hint value. */
5512 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5513 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5514 OPTAB_WIDEN);
5515 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5516 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5517 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5518 GEN_INT (0x3fff), 0);
5519
5520 /* Merge in the hint. */
5521 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5522 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5523 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5524 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5525 OPTAB_WIDEN);
5526 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5527 }
5528
5529 #ifdef ENABLE_EXECUTE_STACK
5530 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5531 0, VOIDmode, 1, tramp, Pmode);
5532 #endif
5533
5534 if (jmpofs >= 0)
5535 emit_insn (gen_imb ());
5536 }
5537 \f
5538 /* Determine where to put an argument to a function.
5539 Value is zero to push the argument on the stack,
5540 or a hard register in which to store the argument.
5541
5542 MODE is the argument's machine mode.
5543 TYPE is the data type of the argument (as a tree).
5544 This is null for libcalls where that information may
5545 not be available.
5546 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5547 the preceding args and about the function being called.
5548 NAMED is nonzero if this argument is a named parameter
5549 (otherwise it is an extra parameter matching an ellipsis).
5550
5551 On Alpha the first 6 words of args are normally in registers
5552 and the rest are pushed. */
5553
5554 rtx
5555 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5556 int named ATTRIBUTE_UNUSED)
5557 {
5558 int basereg;
5559 int num_args;
5560
5561 /* Don't get confused and pass small structures in FP registers. */
5562 if (type && AGGREGATE_TYPE_P (type))
5563 basereg = 16;
5564 else
5565 {
5566 #ifdef ENABLE_CHECKING
5567 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5568 values here. */
5569 gcc_assert (!COMPLEX_MODE_P (mode));
5570 #endif
5571
5572 /* Set up defaults for FP operands passed in FP registers, and
5573 integral operands passed in integer registers. */
5574 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5575 basereg = 32 + 16;
5576 else
5577 basereg = 16;
5578 }
5579
5580 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5581 the three platforms, so we can't avoid conditional compilation. */
5582 #if TARGET_ABI_OPEN_VMS
5583 {
5584 if (mode == VOIDmode)
5585 return alpha_arg_info_reg_val (cum);
5586
5587 num_args = cum.num_args;
5588 if (num_args >= 6
5589 || targetm.calls.must_pass_in_stack (mode, type))
5590 return NULL_RTX;
5591 }
5592 #elif TARGET_ABI_UNICOSMK
5593 {
5594 int size;
5595
5596 /* If this is the last argument, generate the call info word (CIW). */
5597 /* ??? We don't include the caller's line number in the CIW because
5598 I don't know how to determine it if debug infos are turned off. */
5599 if (mode == VOIDmode)
5600 {
5601 int i;
5602 HOST_WIDE_INT lo;
5603 HOST_WIDE_INT hi;
5604 rtx ciw;
5605
5606 lo = 0;
5607
5608 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5609 if (cum.reg_args_type[i])
5610 lo |= (1 << (7 - i));
5611
5612 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5613 lo |= 7;
5614 else
5615 lo |= cum.num_reg_words;
5616
5617 #if HOST_BITS_PER_WIDE_INT == 32
5618 hi = (cum.num_args << 20) | cum.num_arg_words;
5619 #else
5620 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5621 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5622 hi = 0;
5623 #endif
5624 ciw = immed_double_const (lo, hi, DImode);
5625
5626 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5627 UNSPEC_UMK_LOAD_CIW);
5628 }
5629
5630 size = ALPHA_ARG_SIZE (mode, type, named);
5631 num_args = cum.num_reg_words;
5632 if (cum.force_stack
5633 || cum.num_reg_words + size > 6
5634 || targetm.calls.must_pass_in_stack (mode, type))
5635 return NULL_RTX;
5636 else if (type && TYPE_MODE (type) == BLKmode)
5637 {
5638 rtx reg1, reg2;
5639
5640 reg1 = gen_rtx_REG (DImode, num_args + 16);
5641 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5642
5643 /* The argument fits in two registers. Note that we still need to
5644 reserve a register for empty structures. */
5645 if (size == 0)
5646 return NULL_RTX;
5647 else if (size == 1)
5648 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5649 else
5650 {
5651 reg2 = gen_rtx_REG (DImode, num_args + 17);
5652 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5653 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5654 }
5655 }
5656 }
5657 #elif TARGET_ABI_OSF
5658 {
5659 if (cum >= 6)
5660 return NULL_RTX;
5661 num_args = cum;
5662
5663 /* VOID is passed as a special flag for "last argument". */
5664 if (type == void_type_node)
5665 basereg = 16;
5666 else if (targetm.calls.must_pass_in_stack (mode, type))
5667 return NULL_RTX;
5668 }
5669 #else
5670 #error Unhandled ABI
5671 #endif
5672
5673 return gen_rtx_REG (mode, num_args + basereg);
5674 }
5675
5676 static int
5677 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5678 enum machine_mode mode ATTRIBUTE_UNUSED,
5679 tree type ATTRIBUTE_UNUSED,
5680 bool named ATTRIBUTE_UNUSED)
5681 {
5682 int words = 0;
5683
5684 #if TARGET_ABI_OPEN_VMS
5685 if (cum->num_args < 6
5686 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5687 words = 6 - cum->num_args;
5688 #elif TARGET_ABI_UNICOSMK
5689 /* Never any split arguments. */
5690 #elif TARGET_ABI_OSF
5691 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5692 words = 6 - *cum;
5693 #else
5694 #error Unhandled ABI
5695 #endif
5696
5697 return words * UNITS_PER_WORD;
5698 }
5699
5700
5701 /* Return true if TYPE must be returned in memory, instead of in registers. */
5702
5703 static bool
5704 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5705 {
5706 enum machine_mode mode = VOIDmode;
5707 int size;
5708
5709 if (type)
5710 {
5711 mode = TYPE_MODE (type);
5712
5713 /* All aggregates are returned in memory. */
5714 if (AGGREGATE_TYPE_P (type))
5715 return true;
5716 }
5717
5718 size = GET_MODE_SIZE (mode);
5719 switch (GET_MODE_CLASS (mode))
5720 {
5721 case MODE_VECTOR_FLOAT:
5722 /* Pass all float vectors in memory, like an aggregate. */
5723 return true;
5724
5725 case MODE_COMPLEX_FLOAT:
5726 /* We judge complex floats on the size of their element,
5727 not the size of the whole type. */
5728 size = GET_MODE_UNIT_SIZE (mode);
5729 break;
5730
5731 case MODE_INT:
5732 case MODE_FLOAT:
5733 case MODE_COMPLEX_INT:
5734 case MODE_VECTOR_INT:
5735 break;
5736
5737 default:
5738 /* ??? We get called on all sorts of random stuff from
5739 aggregate_value_p. We must return something, but it's not
5740 clear what's safe to return. Pretend it's a struct I
5741 guess. */
5742 return true;
5743 }
5744
5745 /* Otherwise types must fit in one register. */
5746 return size > UNITS_PER_WORD;
5747 }
5748
5749 /* Return true if TYPE should be passed by invisible reference. */
5750
5751 static bool
5752 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5753 enum machine_mode mode,
5754 tree type ATTRIBUTE_UNUSED,
5755 bool named ATTRIBUTE_UNUSED)
5756 {
5757 return mode == TFmode || mode == TCmode;
5758 }
5759
5760 /* Define how to find the value returned by a function. VALTYPE is the
5761 data type of the value (as a tree). If the precise function being
5762 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5763 MODE is set instead of VALTYPE for libcalls.
5764
5765 On Alpha the value is found in $0 for integer functions and
5766 $f0 for floating-point functions. */
5767
5768 rtx
5769 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5770 enum machine_mode mode)
5771 {
5772 unsigned int regnum, dummy;
5773 enum mode_class class;
5774
5775 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5776
5777 if (valtype)
5778 mode = TYPE_MODE (valtype);
5779
5780 class = GET_MODE_CLASS (mode);
5781 switch (class)
5782 {
5783 case MODE_INT:
5784 PROMOTE_MODE (mode, dummy, valtype);
5785 /* FALLTHRU */
5786
5787 case MODE_COMPLEX_INT:
5788 case MODE_VECTOR_INT:
5789 regnum = 0;
5790 break;
5791
5792 case MODE_FLOAT:
5793 regnum = 32;
5794 break;
5795
5796 case MODE_COMPLEX_FLOAT:
5797 {
5798 enum machine_mode cmode = GET_MODE_INNER (mode);
5799
5800 return gen_rtx_PARALLEL
5801 (VOIDmode,
5802 gen_rtvec (2,
5803 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5804 const0_rtx),
5805 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5806 GEN_INT (GET_MODE_SIZE (cmode)))));
5807 }
5808
5809 default:
5810 gcc_unreachable ();
5811 }
5812
5813 return gen_rtx_REG (mode, regnum);
5814 }
5815
5816 /* TCmode complex values are passed by invisible reference. We
5817 should not split these values. */
5818
5819 static bool
5820 alpha_split_complex_arg (tree type)
5821 {
5822 return TYPE_MODE (type) != TCmode;
5823 }
5824
5825 static tree
5826 alpha_build_builtin_va_list (void)
5827 {
5828 tree base, ofs, space, record, type_decl;
5829
5830 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5831 return ptr_type_node;
5832
5833 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5834 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5835 TREE_CHAIN (record) = type_decl;
5836 TYPE_NAME (record) = type_decl;
5837
5838 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5839
5840 /* Dummy field to prevent alignment warnings. */
5841 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5842 DECL_FIELD_CONTEXT (space) = record;
5843 DECL_ARTIFICIAL (space) = 1;
5844 DECL_IGNORED_P (space) = 1;
5845
5846 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5847 integer_type_node);
5848 DECL_FIELD_CONTEXT (ofs) = record;
5849 TREE_CHAIN (ofs) = space;
5850
5851 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5852 ptr_type_node);
5853 DECL_FIELD_CONTEXT (base) = record;
5854 TREE_CHAIN (base) = ofs;
5855
5856 TYPE_FIELDS (record) = base;
5857 layout_type (record);
5858
5859 va_list_gpr_counter_field = ofs;
5860 return record;
5861 }
5862
5863 #if TARGET_ABI_OSF
5864 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5865 and constant additions. */
5866
5867 static tree
5868 va_list_skip_additions (tree lhs)
5869 {
5870 tree rhs, stmt;
5871
5872 if (TREE_CODE (lhs) != SSA_NAME)
5873 return lhs;
5874
5875 for (;;)
5876 {
5877 stmt = SSA_NAME_DEF_STMT (lhs);
5878
5879 if (TREE_CODE (stmt) == PHI_NODE)
5880 return stmt;
5881
5882 if (TREE_CODE (stmt) != MODIFY_EXPR
5883 || TREE_OPERAND (stmt, 0) != lhs)
5884 return lhs;
5885
5886 rhs = TREE_OPERAND (stmt, 1);
5887 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5888 rhs = TREE_OPERAND (rhs, 0);
5889
5890 if ((TREE_CODE (rhs) != NOP_EXPR
5891 && TREE_CODE (rhs) != CONVERT_EXPR
5892 && (TREE_CODE (rhs) != PLUS_EXPR
5893 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5894 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5895 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5896 return rhs;
5897
5898 lhs = TREE_OPERAND (rhs, 0);
5899 }
5900 }
5901
5902 /* Check if LHS = RHS statement is
5903 LHS = *(ap.__base + ap.__offset + cst)
5904 or
5905 LHS = *(ap.__base
5906 + ((ap.__offset + cst <= 47)
5907 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5908 If the former, indicate that GPR registers are needed,
5909 if the latter, indicate that FPR registers are needed.
5910 On alpha, cfun->va_list_gpr_size is used as size of the needed
5911 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if
5912 GPR registers are needed and bit 1 set if FPR registers are needed.
5913 Return true if va_list references should not be scanned for the current
5914 statement. */
5915
5916 static bool
5917 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5918 {
5919 tree base, offset, arg1, arg2;
5920 int offset_arg = 1;
5921
5922 if (TREE_CODE (rhs) != INDIRECT_REF
5923 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5924 return false;
5925
5926 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5927 if (lhs == NULL_TREE
5928 || TREE_CODE (lhs) != PLUS_EXPR)
5929 return false;
5930
5931 base = TREE_OPERAND (lhs, 0);
5932 if (TREE_CODE (base) == SSA_NAME)
5933 base = va_list_skip_additions (base);
5934
5935 if (TREE_CODE (base) != COMPONENT_REF
5936 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5937 {
5938 base = TREE_OPERAND (lhs, 0);
5939 if (TREE_CODE (base) == SSA_NAME)
5940 base = va_list_skip_additions (base);
5941
5942 if (TREE_CODE (base) != COMPONENT_REF
5943 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5944 return false;
5945
5946 offset_arg = 0;
5947 }
5948
5949 base = get_base_address (base);
5950 if (TREE_CODE (base) != VAR_DECL
5951 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5952 return false;
5953
5954 offset = TREE_OPERAND (lhs, offset_arg);
5955 if (TREE_CODE (offset) == SSA_NAME)
5956 offset = va_list_skip_additions (offset);
5957
5958 if (TREE_CODE (offset) == PHI_NODE)
5959 {
5960 HOST_WIDE_INT sub;
5961
5962 if (PHI_NUM_ARGS (offset) != 2)
5963 goto escapes;
5964
5965 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5966 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5967 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5968 {
5969 tree tem = arg1;
5970 arg1 = arg2;
5971 arg2 = tem;
5972
5973 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5974 goto escapes;
5975 }
5976 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5977 goto escapes;
5978
5979 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5980 if (TREE_CODE (arg2) == MINUS_EXPR)
5981 sub = -sub;
5982 if (sub < -48 || sub > -32)
5983 goto escapes;
5984
5985 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5986 if (arg1 != arg2)
5987 goto escapes;
5988
5989 if (TREE_CODE (arg1) == SSA_NAME)
5990 arg1 = va_list_skip_additions (arg1);
5991
5992 if (TREE_CODE (arg1) != COMPONENT_REF
5993 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5994 || get_base_address (arg1) != base)
5995 goto escapes;
5996
5997 /* Need floating point regs. */
5998 cfun->va_list_fpr_size |= 2;
5999 }
6000 else if (TREE_CODE (offset) != COMPONENT_REF
6001 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6002 || get_base_address (offset) != base)
6003 goto escapes;
6004 else
6005 /* Need general regs. */
6006 cfun->va_list_fpr_size |= 1;
6007 return false;
6008
6009 escapes:
6010 si->va_list_escapes = true;
6011 return false;
6012 }
6013 #endif
6014
6015 /* Perform any needed actions needed for a function that is receiving a
6016 variable number of arguments. */
6017
6018 static void
6019 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6020 tree type, int *pretend_size, int no_rtl)
6021 {
6022 CUMULATIVE_ARGS cum = *pcum;
6023
6024 /* Skip the current argument. */
6025 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6026
6027 #if TARGET_ABI_UNICOSMK
6028 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6029 arguments on the stack. Unfortunately, it doesn't always store the first
6030 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6031 with stdargs as we always have at least one named argument there. */
6032 if (cum.num_reg_words < 6)
6033 {
6034 if (!no_rtl)
6035 {
6036 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6037 emit_insn (gen_arg_home_umk ());
6038 }
6039 *pretend_size = 0;
6040 }
6041 #elif TARGET_ABI_OPEN_VMS
6042 /* For VMS, we allocate space for all 6 arg registers plus a count.
6043
6044 However, if NO registers need to be saved, don't allocate any space.
6045 This is not only because we won't need the space, but because AP
6046 includes the current_pretend_args_size and we don't want to mess up
6047 any ap-relative addresses already made. */
6048 if (cum.num_args < 6)
6049 {
6050 if (!no_rtl)
6051 {
6052 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6053 emit_insn (gen_arg_home ());
6054 }
6055 *pretend_size = 7 * UNITS_PER_WORD;
6056 }
6057 #else
6058 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6059 only push those that are remaining. However, if NO registers need to
6060 be saved, don't allocate any space. This is not only because we won't
6061 need the space, but because AP includes the current_pretend_args_size
6062 and we don't want to mess up any ap-relative addresses already made.
6063
6064 If we are not to use the floating-point registers, save the integer
6065 registers where we would put the floating-point registers. This is
6066 not the most efficient way to implement varargs with just one register
6067 class, but it isn't worth doing anything more efficient in this rare
6068 case. */
6069 if (cum >= 6)
6070 return;
6071
6072 if (!no_rtl)
6073 {
6074 int count, set = get_varargs_alias_set ();
6075 rtx tmp;
6076
6077 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6078 if (count > 6 - cum)
6079 count = 6 - cum;
6080
6081 /* Detect whether integer registers or floating-point registers
6082 are needed by the detected va_arg statements. See above for
6083 how these values are computed. Note that the "escape" value
6084 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6085 these bits set. */
6086 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6087
6088 if (cfun->va_list_fpr_size & 1)
6089 {
6090 tmp = gen_rtx_MEM (BLKmode,
6091 plus_constant (virtual_incoming_args_rtx,
6092 (cum + 6) * UNITS_PER_WORD));
6093 set_mem_alias_set (tmp, set);
6094 move_block_from_reg (16 + cum, tmp, count);
6095 }
6096
6097 if (cfun->va_list_fpr_size & 2)
6098 {
6099 tmp = gen_rtx_MEM (BLKmode,
6100 plus_constant (virtual_incoming_args_rtx,
6101 cum * UNITS_PER_WORD));
6102 set_mem_alias_set (tmp, set);
6103 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6104 }
6105 }
6106 *pretend_size = 12 * UNITS_PER_WORD;
6107 #endif
6108 }
6109
6110 void
6111 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6112 {
6113 HOST_WIDE_INT offset;
6114 tree t, offset_field, base_field;
6115
6116 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6117 return;
6118
6119 if (TARGET_ABI_UNICOSMK)
6120 std_expand_builtin_va_start (valist, nextarg);
6121
6122 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6123 up by 48, storing fp arg registers in the first 48 bytes, and the
6124 integer arg registers in the next 48 bytes. This is only done,
6125 however, if any integer registers need to be stored.
6126
6127 If no integer registers need be stored, then we must subtract 48
6128 in order to account for the integer arg registers which are counted
6129 in argsize above, but which are not actually stored on the stack.
6130 Must further be careful here about structures straddling the last
6131 integer argument register; that futzes with pretend_args_size,
6132 which changes the meaning of AP. */
6133
6134 if (NUM_ARGS < 6)
6135 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6136 else
6137 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6138
6139 if (TARGET_ABI_OPEN_VMS)
6140 {
6141 nextarg = plus_constant (nextarg, offset);
6142 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6143 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
6144 make_tree (ptr_type_node, nextarg));
6145 TREE_SIDE_EFFECTS (t) = 1;
6146
6147 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6148 }
6149 else
6150 {
6151 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6152 offset_field = TREE_CHAIN (base_field);
6153
6154 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6155 valist, base_field, NULL_TREE);
6156 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6157 valist, offset_field, NULL_TREE);
6158
6159 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6160 t = build (PLUS_EXPR, ptr_type_node, t,
6161 build_int_cst (NULL_TREE, offset));
6162 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6163 TREE_SIDE_EFFECTS (t) = 1;
6164 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6165
6166 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6167 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6168 TREE_SIDE_EFFECTS (t) = 1;
6169 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6170 }
6171 }
6172
6173 static tree
6174 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6175 {
6176 tree type_size, ptr_type, addend, t, addr, internal_post;
6177
6178 /* If the type could not be passed in registers, skip the block
6179 reserved for the registers. */
6180 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6181 {
6182 t = build_int_cst (TREE_TYPE (offset), 6*8);
6183 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
6184 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
6185 gimplify_and_add (t, pre_p);
6186 }
6187
6188 addend = offset;
6189 ptr_type = build_pointer_type (type);
6190
6191 if (TREE_CODE (type) == COMPLEX_TYPE)
6192 {
6193 tree real_part, imag_part, real_temp;
6194
6195 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6196 offset, pre_p);
6197
6198 /* Copy the value into a new temporary, lest the formal temporary
6199 be reused out from under us. */
6200 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6201
6202 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6203 offset, pre_p);
6204
6205 return build (COMPLEX_EXPR, type, real_temp, imag_part);
6206 }
6207 else if (TREE_CODE (type) == REAL_TYPE)
6208 {
6209 tree fpaddend, cond, fourtyeight;
6210
6211 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6212 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
6213 addend, fourtyeight));
6214 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
6215 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
6216 fpaddend, addend));
6217 }
6218
6219 /* Build the final address and force that value into a temporary. */
6220 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6221 fold_convert (ptr_type, addend));
6222 internal_post = NULL;
6223 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6224 append_to_statement_list (internal_post, pre_p);
6225
6226 /* Update the offset field. */
6227 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6228 if (type_size == NULL || TREE_OVERFLOW (type_size))
6229 t = size_zero_node;
6230 else
6231 {
6232 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6233 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6234 t = size_binop (MULT_EXPR, t, size_int (8));
6235 }
6236 t = fold_convert (TREE_TYPE (offset), t);
6237 t = build (MODIFY_EXPR, void_type_node, offset,
6238 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6239 gimplify_and_add (t, pre_p);
6240
6241 return build_va_arg_indirect_ref (addr);
6242 }
6243
6244 static tree
6245 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6246 {
6247 tree offset_field, base_field, offset, base, t, r;
6248 bool indirect;
6249
6250 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6251 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6252
6253 base_field = TYPE_FIELDS (va_list_type_node);
6254 offset_field = TREE_CHAIN (base_field);
6255 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
6256 valist, base_field, NULL_TREE);
6257 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
6258 valist, offset_field, NULL_TREE);
6259
6260 /* Pull the fields of the structure out into temporaries. Since we never
6261 modify the base field, we can use a formal temporary. Sign-extend the
6262 offset field so that it's the proper width for pointer arithmetic. */
6263 base = get_formal_tmp_var (base_field, pre_p);
6264
6265 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6266 offset = get_initialized_tmp_var (t, pre_p, NULL);
6267
6268 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6269 if (indirect)
6270 type = build_pointer_type (type);
6271
6272 /* Find the value. Note that this will be a stable indirection, or
6273 a composite of stable indirections in the case of complex. */
6274 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6275
6276 /* Stuff the offset temporary back into its field. */
6277 t = build (MODIFY_EXPR, void_type_node, offset_field,
6278 fold_convert (TREE_TYPE (offset_field), offset));
6279 gimplify_and_add (t, pre_p);
6280
6281 if (indirect)
6282 r = build_va_arg_indirect_ref (r);
6283
6284 return r;
6285 }
6286 \f
6287 /* Builtins. */
6288
6289 enum alpha_builtin
6290 {
6291 ALPHA_BUILTIN_CMPBGE,
6292 ALPHA_BUILTIN_EXTBL,
6293 ALPHA_BUILTIN_EXTWL,
6294 ALPHA_BUILTIN_EXTLL,
6295 ALPHA_BUILTIN_EXTQL,
6296 ALPHA_BUILTIN_EXTWH,
6297 ALPHA_BUILTIN_EXTLH,
6298 ALPHA_BUILTIN_EXTQH,
6299 ALPHA_BUILTIN_INSBL,
6300 ALPHA_BUILTIN_INSWL,
6301 ALPHA_BUILTIN_INSLL,
6302 ALPHA_BUILTIN_INSQL,
6303 ALPHA_BUILTIN_INSWH,
6304 ALPHA_BUILTIN_INSLH,
6305 ALPHA_BUILTIN_INSQH,
6306 ALPHA_BUILTIN_MSKBL,
6307 ALPHA_BUILTIN_MSKWL,
6308 ALPHA_BUILTIN_MSKLL,
6309 ALPHA_BUILTIN_MSKQL,
6310 ALPHA_BUILTIN_MSKWH,
6311 ALPHA_BUILTIN_MSKLH,
6312 ALPHA_BUILTIN_MSKQH,
6313 ALPHA_BUILTIN_UMULH,
6314 ALPHA_BUILTIN_ZAP,
6315 ALPHA_BUILTIN_ZAPNOT,
6316 ALPHA_BUILTIN_AMASK,
6317 ALPHA_BUILTIN_IMPLVER,
6318 ALPHA_BUILTIN_RPCC,
6319 ALPHA_BUILTIN_THREAD_POINTER,
6320 ALPHA_BUILTIN_SET_THREAD_POINTER,
6321
6322 /* TARGET_MAX */
6323 ALPHA_BUILTIN_MINUB8,
6324 ALPHA_BUILTIN_MINSB8,
6325 ALPHA_BUILTIN_MINUW4,
6326 ALPHA_BUILTIN_MINSW4,
6327 ALPHA_BUILTIN_MAXUB8,
6328 ALPHA_BUILTIN_MAXSB8,
6329 ALPHA_BUILTIN_MAXUW4,
6330 ALPHA_BUILTIN_MAXSW4,
6331 ALPHA_BUILTIN_PERR,
6332 ALPHA_BUILTIN_PKLB,
6333 ALPHA_BUILTIN_PKWB,
6334 ALPHA_BUILTIN_UNPKBL,
6335 ALPHA_BUILTIN_UNPKBW,
6336
6337 /* TARGET_CIX */
6338 ALPHA_BUILTIN_CTTZ,
6339 ALPHA_BUILTIN_CTLZ,
6340 ALPHA_BUILTIN_CTPOP,
6341
6342 ALPHA_BUILTIN_max
6343 };
6344
6345 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6346 CODE_FOR_builtin_cmpbge,
6347 CODE_FOR_builtin_extbl,
6348 CODE_FOR_builtin_extwl,
6349 CODE_FOR_builtin_extll,
6350 CODE_FOR_builtin_extql,
6351 CODE_FOR_builtin_extwh,
6352 CODE_FOR_builtin_extlh,
6353 CODE_FOR_builtin_extqh,
6354 CODE_FOR_builtin_insbl,
6355 CODE_FOR_builtin_inswl,
6356 CODE_FOR_builtin_insll,
6357 CODE_FOR_builtin_insql,
6358 CODE_FOR_builtin_inswh,
6359 CODE_FOR_builtin_inslh,
6360 CODE_FOR_builtin_insqh,
6361 CODE_FOR_builtin_mskbl,
6362 CODE_FOR_builtin_mskwl,
6363 CODE_FOR_builtin_mskll,
6364 CODE_FOR_builtin_mskql,
6365 CODE_FOR_builtin_mskwh,
6366 CODE_FOR_builtin_msklh,
6367 CODE_FOR_builtin_mskqh,
6368 CODE_FOR_umuldi3_highpart,
6369 CODE_FOR_builtin_zap,
6370 CODE_FOR_builtin_zapnot,
6371 CODE_FOR_builtin_amask,
6372 CODE_FOR_builtin_implver,
6373 CODE_FOR_builtin_rpcc,
6374 CODE_FOR_load_tp,
6375 CODE_FOR_set_tp,
6376
6377 /* TARGET_MAX */
6378 CODE_FOR_builtin_minub8,
6379 CODE_FOR_builtin_minsb8,
6380 CODE_FOR_builtin_minuw4,
6381 CODE_FOR_builtin_minsw4,
6382 CODE_FOR_builtin_maxub8,
6383 CODE_FOR_builtin_maxsb8,
6384 CODE_FOR_builtin_maxuw4,
6385 CODE_FOR_builtin_maxsw4,
6386 CODE_FOR_builtin_perr,
6387 CODE_FOR_builtin_pklb,
6388 CODE_FOR_builtin_pkwb,
6389 CODE_FOR_builtin_unpkbl,
6390 CODE_FOR_builtin_unpkbw,
6391
6392 /* TARGET_CIX */
6393 CODE_FOR_ctzdi2,
6394 CODE_FOR_clzdi2,
6395 CODE_FOR_popcountdi2
6396 };
6397
6398 struct alpha_builtin_def
6399 {
6400 const char *name;
6401 enum alpha_builtin code;
6402 unsigned int target_mask;
6403 bool is_const;
6404 };
6405
6406 static struct alpha_builtin_def const zero_arg_builtins[] = {
6407 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6408 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6409 };
6410
6411 static struct alpha_builtin_def const one_arg_builtins[] = {
6412 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6413 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6414 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6415 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6416 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6417 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6418 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6419 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6420 };
6421
6422 static struct alpha_builtin_def const two_arg_builtins[] = {
6423 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6424 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6425 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6426 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6427 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6428 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6429 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6430 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6431 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6432 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6433 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6434 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6435 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6436 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6437 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6438 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6439 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6440 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6441 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6442 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6443 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6444 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6445 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6446 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6447 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6448 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6449 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6450 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6451 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6452 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6453 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6454 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6455 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6456 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6457 };
6458
6459 static GTY(()) tree alpha_v8qi_u;
6460 static GTY(()) tree alpha_v8qi_s;
6461 static GTY(()) tree alpha_v4hi_u;
6462 static GTY(()) tree alpha_v4hi_s;
6463
6464 static void
6465 alpha_init_builtins (void)
6466 {
6467 const struct alpha_builtin_def *p;
6468 tree ftype, attrs[2];
6469 size_t i;
6470
6471 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
6472 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
6473
6474 ftype = build_function_type (long_integer_type_node, void_list_node);
6475
6476 p = zero_arg_builtins;
6477 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
6478 if ((target_flags & p->target_mask) == p->target_mask)
6479 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6480 NULL, attrs[p->is_const]);
6481
6482 ftype = build_function_type_list (long_integer_type_node,
6483 long_integer_type_node, NULL_TREE);
6484
6485 p = one_arg_builtins;
6486 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
6487 if ((target_flags & p->target_mask) == p->target_mask)
6488 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6489 NULL, attrs[p->is_const]);
6490
6491 ftype = build_function_type_list (long_integer_type_node,
6492 long_integer_type_node,
6493 long_integer_type_node, NULL_TREE);
6494
6495 p = two_arg_builtins;
6496 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6497 if ((target_flags & p->target_mask) == p->target_mask)
6498 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6499 NULL, attrs[p->is_const]);
6500
6501 ftype = build_function_type (ptr_type_node, void_list_node);
6502 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6503 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6504 NULL, attrs[0]);
6505
6506 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6507 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6508 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6509 NULL, attrs[0]);
6510
6511 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6512 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6513 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6514 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6515 }
6516
6517 /* Expand an expression EXP that calls a built-in function,
6518 with result going to TARGET if that's convenient
6519 (and in mode MODE if that's convenient).
6520 SUBTARGET may be used as the target for computing one of EXP's operands.
6521 IGNORE is nonzero if the value is to be ignored. */
6522
6523 static rtx
6524 alpha_expand_builtin (tree exp, rtx target,
6525 rtx subtarget ATTRIBUTE_UNUSED,
6526 enum machine_mode mode ATTRIBUTE_UNUSED,
6527 int ignore ATTRIBUTE_UNUSED)
6528 {
6529 #define MAX_ARGS 2
6530
6531 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6532 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6533 tree arglist = TREE_OPERAND (exp, 1);
6534 enum insn_code icode;
6535 rtx op[MAX_ARGS], pat;
6536 int arity;
6537 bool nonvoid;
6538
6539 if (fcode >= ALPHA_BUILTIN_max)
6540 internal_error ("bad builtin fcode");
6541 icode = code_for_builtin[fcode];
6542 if (icode == 0)
6543 internal_error ("bad builtin fcode");
6544
6545 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6546
6547 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6548 arglist;
6549 arglist = TREE_CHAIN (arglist), arity++)
6550 {
6551 const struct insn_operand_data *insn_op;
6552
6553 tree arg = TREE_VALUE (arglist);
6554 if (arg == error_mark_node)
6555 return NULL_RTX;
6556 if (arity > MAX_ARGS)
6557 return NULL_RTX;
6558
6559 insn_op = &insn_data[icode].operand[arity + nonvoid];
6560
6561 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6562
6563 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6564 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6565 }
6566
6567 if (nonvoid)
6568 {
6569 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6570 if (!target
6571 || GET_MODE (target) != tmode
6572 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6573 target = gen_reg_rtx (tmode);
6574 }
6575
6576 switch (arity)
6577 {
6578 case 0:
6579 pat = GEN_FCN (icode) (target);
6580 break;
6581 case 1:
6582 if (nonvoid)
6583 pat = GEN_FCN (icode) (target, op[0]);
6584 else
6585 pat = GEN_FCN (icode) (op[0]);
6586 break;
6587 case 2:
6588 pat = GEN_FCN (icode) (target, op[0], op[1]);
6589 break;
6590 default:
6591 gcc_unreachable ();
6592 }
6593 if (!pat)
6594 return NULL_RTX;
6595 emit_insn (pat);
6596
6597 if (nonvoid)
6598 return target;
6599 else
6600 return const0_rtx;
6601 }
6602
6603
6604 /* Several bits below assume HWI >= 64 bits. This should be enforced
6605 by config.gcc. */
6606 #if HOST_BITS_PER_WIDE_INT < 64
6607 # error "HOST_WIDE_INT too small"
6608 #endif
6609
6610 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6611 with an 8 bit output vector. OPINT contains the integer operands; bit N
6612 of OP_CONST is set if OPINT[N] is valid. */
6613
6614 static tree
6615 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6616 {
6617 if (op_const == 3)
6618 {
6619 int i, val;
6620 for (i = 0, val = 0; i < 8; ++i)
6621 {
6622 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6623 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6624 if (c0 >= c1)
6625 val |= 1 << i;
6626 }
6627 return build_int_cst (long_integer_type_node, val);
6628 }
6629 else if (op_const == 2 && opint[1] == 0)
6630 return build_int_cst (long_integer_type_node, 0xff);
6631 return NULL;
6632 }
6633
6634 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6635 specialized form of an AND operation. Other byte manipulation instructions
6636 are defined in terms of this instruction, so this is also used as a
6637 subroutine for other builtins.
6638
6639 OP contains the tree operands; OPINT contains the extracted integer values.
6640 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6641 OPINT may be considered. */
6642
6643 static tree
6644 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6645 long op_const)
6646 {
6647 if (op_const & 2)
6648 {
6649 unsigned HOST_WIDE_INT mask = 0;
6650 int i;
6651
6652 for (i = 0; i < 8; ++i)
6653 if ((opint[1] >> i) & 1)
6654 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6655
6656 if (op_const & 1)
6657 return build_int_cst (long_integer_type_node, opint[0] & mask);
6658
6659 if (op)
6660 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6661 build_int_cst (long_integer_type_node, mask)));
6662 }
6663 else if ((op_const & 1) && opint[0] == 0)
6664 return build_int_cst (long_integer_type_node, 0);
6665 return NULL;
6666 }
6667
6668 /* Fold the builtins for the EXT family of instructions. */
6669
6670 static tree
6671 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6672 long op_const, unsigned HOST_WIDE_INT bytemask,
6673 bool is_high)
6674 {
6675 long zap_const = 2;
6676 tree *zap_op = NULL;
6677
6678 if (op_const & 2)
6679 {
6680 unsigned HOST_WIDE_INT loc;
6681
6682 loc = opint[1] & 7;
6683 if (BYTES_BIG_ENDIAN)
6684 loc ^= 7;
6685 loc *= 8;
6686
6687 if (loc != 0)
6688 {
6689 if (op_const & 1)
6690 {
6691 unsigned HOST_WIDE_INT temp = opint[0];
6692 if (is_high)
6693 temp <<= loc;
6694 else
6695 temp >>= loc;
6696 opint[0] = temp;
6697 zap_const = 3;
6698 }
6699 }
6700 else
6701 zap_op = op;
6702 }
6703
6704 opint[1] = bytemask;
6705 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6706 }
6707
6708 /* Fold the builtins for the INS family of instructions. */
6709
6710 static tree
6711 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6712 long op_const, unsigned HOST_WIDE_INT bytemask,
6713 bool is_high)
6714 {
6715 if ((op_const & 1) && opint[0] == 0)
6716 return build_int_cst (long_integer_type_node, 0);
6717
6718 if (op_const & 2)
6719 {
6720 unsigned HOST_WIDE_INT temp, loc, byteloc;
6721 tree *zap_op = NULL;
6722
6723 loc = opint[1] & 7;
6724 if (BYTES_BIG_ENDIAN)
6725 loc ^= 7;
6726 bytemask <<= loc;
6727
6728 temp = opint[0];
6729 if (is_high)
6730 {
6731 byteloc = (64 - (loc * 8)) & 0x3f;
6732 if (byteloc == 0)
6733 zap_op = op;
6734 else
6735 temp >>= byteloc;
6736 bytemask >>= 8;
6737 }
6738 else
6739 {
6740 byteloc = loc * 8;
6741 if (byteloc == 0)
6742 zap_op = op;
6743 else
6744 temp <<= byteloc;
6745 }
6746
6747 opint[0] = temp;
6748 opint[1] = bytemask;
6749 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6750 }
6751
6752 return NULL;
6753 }
6754
6755 static tree
6756 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6757 long op_const, unsigned HOST_WIDE_INT bytemask,
6758 bool is_high)
6759 {
6760 if (op_const & 2)
6761 {
6762 unsigned HOST_WIDE_INT loc;
6763
6764 loc = opint[1] & 7;
6765 if (BYTES_BIG_ENDIAN)
6766 loc ^= 7;
6767 bytemask <<= loc;
6768
6769 if (is_high)
6770 bytemask >>= 8;
6771
6772 opint[1] = bytemask ^ 0xff;
6773 }
6774
6775 return alpha_fold_builtin_zapnot (op, opint, op_const);
6776 }
6777
6778 static tree
6779 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6780 {
6781 switch (op_const)
6782 {
6783 case 3:
6784 {
6785 unsigned HOST_WIDE_INT l;
6786 HOST_WIDE_INT h;
6787
6788 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6789
6790 #if HOST_BITS_PER_WIDE_INT > 64
6791 # error fixme
6792 #endif
6793
6794 return build_int_cst (long_integer_type_node, h);
6795 }
6796
6797 case 1:
6798 opint[1] = opint[0];
6799 /* FALLTHRU */
6800 case 2:
6801 /* Note that (X*1) >> 64 == 0. */
6802 if (opint[1] == 0 || opint[1] == 1)
6803 return build_int_cst (long_integer_type_node, 0);
6804 break;
6805 }
6806 return NULL;
6807 }
6808
6809 static tree
6810 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6811 {
6812 tree op0 = fold_convert (vtype, op[0]);
6813 tree op1 = fold_convert (vtype, op[1]);
6814 tree val = fold (build2 (code, vtype, op0, op1));
6815 return fold_convert (long_integer_type_node, val);
6816 }
6817
6818 static tree
6819 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6820 {
6821 unsigned HOST_WIDE_INT temp = 0;
6822 int i;
6823
6824 if (op_const != 3)
6825 return NULL;
6826
6827 for (i = 0; i < 8; ++i)
6828 {
6829 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6830 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6831 if (a >= b)
6832 temp += a - b;
6833 else
6834 temp += b - a;
6835 }
6836
6837 return build_int_cst (long_integer_type_node, temp);
6838 }
6839
6840 static tree
6841 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6842 {
6843 unsigned HOST_WIDE_INT temp;
6844
6845 if (op_const == 0)
6846 return NULL;
6847
6848 temp = opint[0] & 0xff;
6849 temp |= (opint[0] >> 24) & 0xff00;
6850
6851 return build_int_cst (long_integer_type_node, temp);
6852 }
6853
6854 static tree
6855 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6856 {
6857 unsigned HOST_WIDE_INT temp;
6858
6859 if (op_const == 0)
6860 return NULL;
6861
6862 temp = opint[0] & 0xff;
6863 temp |= (opint[0] >> 8) & 0xff00;
6864 temp |= (opint[0] >> 16) & 0xff0000;
6865 temp |= (opint[0] >> 24) & 0xff000000;
6866
6867 return build_int_cst (long_integer_type_node, temp);
6868 }
6869
6870 static tree
6871 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6872 {
6873 unsigned HOST_WIDE_INT temp;
6874
6875 if (op_const == 0)
6876 return NULL;
6877
6878 temp = opint[0] & 0xff;
6879 temp |= (opint[0] & 0xff00) << 24;
6880
6881 return build_int_cst (long_integer_type_node, temp);
6882 }
6883
6884 static tree
6885 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6886 {
6887 unsigned HOST_WIDE_INT temp;
6888
6889 if (op_const == 0)
6890 return NULL;
6891
6892 temp = opint[0] & 0xff;
6893 temp |= (opint[0] & 0x0000ff00) << 8;
6894 temp |= (opint[0] & 0x00ff0000) << 16;
6895 temp |= (opint[0] & 0xff000000) << 24;
6896
6897 return build_int_cst (long_integer_type_node, temp);
6898 }
6899
6900 static tree
6901 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6902 {
6903 unsigned HOST_WIDE_INT temp;
6904
6905 if (op_const == 0)
6906 return NULL;
6907
6908 if (opint[0] == 0)
6909 temp = 64;
6910 else
6911 temp = exact_log2 (opint[0] & -opint[0]);
6912
6913 return build_int_cst (long_integer_type_node, temp);
6914 }
6915
6916 static tree
6917 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6918 {
6919 unsigned HOST_WIDE_INT temp;
6920
6921 if (op_const == 0)
6922 return NULL;
6923
6924 if (opint[0] == 0)
6925 temp = 64;
6926 else
6927 temp = 64 - floor_log2 (opint[0]) - 1;
6928
6929 return build_int_cst (long_integer_type_node, temp);
6930 }
6931
6932 static tree
6933 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6934 {
6935 unsigned HOST_WIDE_INT temp, op;
6936
6937 if (op_const == 0)
6938 return NULL;
6939
6940 op = opint[0];
6941 temp = 0;
6942 while (op)
6943 temp++, op &= op - 1;
6944
6945 return build_int_cst (long_integer_type_node, temp);
6946 }
6947
6948 /* Fold one of our builtin functions. */
6949
6950 static tree
6951 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6952 {
6953 tree op[MAX_ARGS], t;
6954 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6955 long op_const = 0, arity = 0;
6956
6957 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6958 {
6959 tree arg = TREE_VALUE (t);
6960 if (arg == error_mark_node)
6961 return NULL;
6962 if (arity >= MAX_ARGS)
6963 return NULL;
6964
6965 op[arity] = arg;
6966 opint[arity] = 0;
6967 if (TREE_CODE (arg) == INTEGER_CST)
6968 {
6969 op_const |= 1L << arity;
6970 opint[arity] = int_cst_value (arg);
6971 }
6972 }
6973
6974 switch (DECL_FUNCTION_CODE (fndecl))
6975 {
6976 case ALPHA_BUILTIN_CMPBGE:
6977 return alpha_fold_builtin_cmpbge (opint, op_const);
6978
6979 case ALPHA_BUILTIN_EXTBL:
6980 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6981 case ALPHA_BUILTIN_EXTWL:
6982 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6983 case ALPHA_BUILTIN_EXTLL:
6984 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6985 case ALPHA_BUILTIN_EXTQL:
6986 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6987 case ALPHA_BUILTIN_EXTWH:
6988 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6989 case ALPHA_BUILTIN_EXTLH:
6990 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6991 case ALPHA_BUILTIN_EXTQH:
6992 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6993
6994 case ALPHA_BUILTIN_INSBL:
6995 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6996 case ALPHA_BUILTIN_INSWL:
6997 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6998 case ALPHA_BUILTIN_INSLL:
6999 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7000 case ALPHA_BUILTIN_INSQL:
7001 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7002 case ALPHA_BUILTIN_INSWH:
7003 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7004 case ALPHA_BUILTIN_INSLH:
7005 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7006 case ALPHA_BUILTIN_INSQH:
7007 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7008
7009 case ALPHA_BUILTIN_MSKBL:
7010 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7011 case ALPHA_BUILTIN_MSKWL:
7012 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7013 case ALPHA_BUILTIN_MSKLL:
7014 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7015 case ALPHA_BUILTIN_MSKQL:
7016 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7017 case ALPHA_BUILTIN_MSKWH:
7018 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7019 case ALPHA_BUILTIN_MSKLH:
7020 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7021 case ALPHA_BUILTIN_MSKQH:
7022 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7023
7024 case ALPHA_BUILTIN_UMULH:
7025 return alpha_fold_builtin_umulh (opint, op_const);
7026
7027 case ALPHA_BUILTIN_ZAP:
7028 opint[1] ^= 0xff;
7029 /* FALLTHRU */
7030 case ALPHA_BUILTIN_ZAPNOT:
7031 return alpha_fold_builtin_zapnot (op, opint, op_const);
7032
7033 case ALPHA_BUILTIN_MINUB8:
7034 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7035 case ALPHA_BUILTIN_MINSB8:
7036 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7037 case ALPHA_BUILTIN_MINUW4:
7038 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7039 case ALPHA_BUILTIN_MINSW4:
7040 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7041 case ALPHA_BUILTIN_MAXUB8:
7042 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7043 case ALPHA_BUILTIN_MAXSB8:
7044 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7045 case ALPHA_BUILTIN_MAXUW4:
7046 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7047 case ALPHA_BUILTIN_MAXSW4:
7048 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7049
7050 case ALPHA_BUILTIN_PERR:
7051 return alpha_fold_builtin_perr (opint, op_const);
7052 case ALPHA_BUILTIN_PKLB:
7053 return alpha_fold_builtin_pklb (opint, op_const);
7054 case ALPHA_BUILTIN_PKWB:
7055 return alpha_fold_builtin_pkwb (opint, op_const);
7056 case ALPHA_BUILTIN_UNPKBL:
7057 return alpha_fold_builtin_unpkbl (opint, op_const);
7058 case ALPHA_BUILTIN_UNPKBW:
7059 return alpha_fold_builtin_unpkbw (opint, op_const);
7060
7061 case ALPHA_BUILTIN_CTTZ:
7062 return alpha_fold_builtin_cttz (opint, op_const);
7063 case ALPHA_BUILTIN_CTLZ:
7064 return alpha_fold_builtin_ctlz (opint, op_const);
7065 case ALPHA_BUILTIN_CTPOP:
7066 return alpha_fold_builtin_ctpop (opint, op_const);
7067
7068 case ALPHA_BUILTIN_AMASK:
7069 case ALPHA_BUILTIN_IMPLVER:
7070 case ALPHA_BUILTIN_RPCC:
7071 case ALPHA_BUILTIN_THREAD_POINTER:
7072 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7073 /* None of these are foldable at compile-time. */
7074 default:
7075 return NULL;
7076 }
7077 }
7078 \f
7079 /* This page contains routines that are used to determine what the function
7080 prologue and epilogue code will do and write them out. */
7081
7082 /* Compute the size of the save area in the stack. */
7083
7084 /* These variables are used for communication between the following functions.
7085 They indicate various things about the current function being compiled
7086 that are used to tell what kind of prologue, epilogue and procedure
7087 descriptor to generate. */
7088
7089 /* Nonzero if we need a stack procedure. */
7090 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7091 static enum alpha_procedure_types alpha_procedure_type;
7092
7093 /* Register number (either FP or SP) that is used to unwind the frame. */
7094 static int vms_unwind_regno;
7095
7096 /* Register number used to save FP. We need not have one for RA since
7097 we don't modify it for register procedures. This is only defined
7098 for register frame procedures. */
7099 static int vms_save_fp_regno;
7100
7101 /* Register number used to reference objects off our PV. */
7102 static int vms_base_regno;
7103
7104 /* Compute register masks for saved registers. */
7105
7106 static void
7107 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7108 {
7109 unsigned long imask = 0;
7110 unsigned long fmask = 0;
7111 unsigned int i;
7112
7113 /* When outputting a thunk, we don't have valid register life info,
7114 but assemble_start_function wants to output .frame and .mask
7115 directives. */
7116 if (current_function_is_thunk)
7117 {
7118 *imaskP = 0;
7119 *fmaskP = 0;
7120 return;
7121 }
7122
7123 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7124 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7125
7126 /* One for every register we have to save. */
7127 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7128 if (! fixed_regs[i] && ! call_used_regs[i]
7129 && regs_ever_live[i] && i != REG_RA
7130 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7131 {
7132 if (i < 32)
7133 imask |= (1UL << i);
7134 else
7135 fmask |= (1UL << (i - 32));
7136 }
7137
7138 /* We need to restore these for the handler. */
7139 if (current_function_calls_eh_return)
7140 {
7141 for (i = 0; ; ++i)
7142 {
7143 unsigned regno = EH_RETURN_DATA_REGNO (i);
7144 if (regno == INVALID_REGNUM)
7145 break;
7146 imask |= 1UL << regno;
7147 }
7148 }
7149
7150 /* If any register spilled, then spill the return address also. */
7151 /* ??? This is required by the Digital stack unwind specification
7152 and isn't needed if we're doing Dwarf2 unwinding. */
7153 if (imask || fmask || alpha_ra_ever_killed ())
7154 imask |= (1UL << REG_RA);
7155
7156 *imaskP = imask;
7157 *fmaskP = fmask;
7158 }
7159
7160 int
7161 alpha_sa_size (void)
7162 {
7163 unsigned long mask[2];
7164 int sa_size = 0;
7165 int i, j;
7166
7167 alpha_sa_mask (&mask[0], &mask[1]);
7168
7169 if (TARGET_ABI_UNICOSMK)
7170 {
7171 if (mask[0] || mask[1])
7172 sa_size = 14;
7173 }
7174 else
7175 {
7176 for (j = 0; j < 2; ++j)
7177 for (i = 0; i < 32; ++i)
7178 if ((mask[j] >> i) & 1)
7179 sa_size++;
7180 }
7181
7182 if (TARGET_ABI_UNICOSMK)
7183 {
7184 /* We might not need to generate a frame if we don't make any calls
7185 (including calls to __T3E_MISMATCH if this is a vararg function),
7186 don't have any local variables which require stack slots, don't
7187 use alloca and have not determined that we need a frame for other
7188 reasons. */
7189
7190 alpha_procedure_type
7191 = (sa_size || get_frame_size() != 0
7192 || current_function_outgoing_args_size
7193 || current_function_stdarg || current_function_calls_alloca
7194 || frame_pointer_needed)
7195 ? PT_STACK : PT_REGISTER;
7196
7197 /* Always reserve space for saving callee-saved registers if we
7198 need a frame as required by the calling convention. */
7199 if (alpha_procedure_type == PT_STACK)
7200 sa_size = 14;
7201 }
7202 else if (TARGET_ABI_OPEN_VMS)
7203 {
7204 /* Start by assuming we can use a register procedure if we don't
7205 make any calls (REG_RA not used) or need to save any
7206 registers and a stack procedure if we do. */
7207 if ((mask[0] >> REG_RA) & 1)
7208 alpha_procedure_type = PT_STACK;
7209 else if (get_frame_size() != 0)
7210 alpha_procedure_type = PT_REGISTER;
7211 else
7212 alpha_procedure_type = PT_NULL;
7213
7214 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7215 made the final decision on stack procedure vs register procedure. */
7216 if (alpha_procedure_type == PT_STACK)
7217 sa_size -= 2;
7218
7219 /* Decide whether to refer to objects off our PV via FP or PV.
7220 If we need FP for something else or if we receive a nonlocal
7221 goto (which expects PV to contain the value), we must use PV.
7222 Otherwise, start by assuming we can use FP. */
7223
7224 vms_base_regno
7225 = (frame_pointer_needed
7226 || current_function_has_nonlocal_label
7227 || alpha_procedure_type == PT_STACK
7228 || current_function_outgoing_args_size)
7229 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7230
7231 /* If we want to copy PV into FP, we need to find some register
7232 in which to save FP. */
7233
7234 vms_save_fp_regno = -1;
7235 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7236 for (i = 0; i < 32; i++)
7237 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7238 vms_save_fp_regno = i;
7239
7240 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7241 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7242 else if (alpha_procedure_type == PT_NULL)
7243 vms_base_regno = REG_PV;
7244
7245 /* Stack unwinding should be done via FP unless we use it for PV. */
7246 vms_unwind_regno = (vms_base_regno == REG_PV
7247 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7248
7249 /* If this is a stack procedure, allow space for saving FP and RA. */
7250 if (alpha_procedure_type == PT_STACK)
7251 sa_size += 2;
7252 }
7253 else
7254 {
7255 /* Our size must be even (multiple of 16 bytes). */
7256 if (sa_size & 1)
7257 sa_size++;
7258 }
7259
7260 return sa_size * 8;
7261 }
7262
7263 /* Define the offset between two registers, one to be eliminated,
7264 and the other its replacement, at the start of a routine. */
7265
7266 HOST_WIDE_INT
7267 alpha_initial_elimination_offset (unsigned int from,
7268 unsigned int to ATTRIBUTE_UNUSED)
7269 {
7270 HOST_WIDE_INT ret;
7271
7272 ret = alpha_sa_size ();
7273 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7274
7275 switch (from)
7276 {
7277 case FRAME_POINTER_REGNUM:
7278 break;
7279
7280 case ARG_POINTER_REGNUM:
7281 ret += (ALPHA_ROUND (get_frame_size ()
7282 + current_function_pretend_args_size)
7283 - current_function_pretend_args_size);
7284 break;
7285
7286 default:
7287 gcc_unreachable ();
7288 }
7289
7290 return ret;
7291 }
7292
7293 int
7294 alpha_pv_save_size (void)
7295 {
7296 alpha_sa_size ();
7297 return alpha_procedure_type == PT_STACK ? 8 : 0;
7298 }
7299
7300 int
7301 alpha_using_fp (void)
7302 {
7303 alpha_sa_size ();
7304 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7305 }
7306
7307 #if TARGET_ABI_OPEN_VMS
7308
7309 const struct attribute_spec vms_attribute_table[] =
7310 {
7311 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7312 { "overlaid", 0, 0, true, false, false, NULL },
7313 { "global", 0, 0, true, false, false, NULL },
7314 { "initialize", 0, 0, true, false, false, NULL },
7315 { NULL, 0, 0, false, false, false, NULL }
7316 };
7317
7318 #endif
7319
7320 static int
7321 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7322 {
7323 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7324 }
7325
7326 int
7327 alpha_find_lo_sum_using_gp (rtx insn)
7328 {
7329 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7330 }
7331
7332 static int
7333 alpha_does_function_need_gp (void)
7334 {
7335 rtx insn;
7336
7337 /* The GP being variable is an OSF abi thing. */
7338 if (! TARGET_ABI_OSF)
7339 return 0;
7340
7341 /* We need the gp to load the address of __mcount. */
7342 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7343 return 1;
7344
7345 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7346 if (current_function_is_thunk)
7347 return 1;
7348
7349 /* The nonlocal receiver pattern assumes that the gp is valid for
7350 the nested function. Reasonable because it's almost always set
7351 correctly already. For the cases where that's wrong, make sure
7352 the nested function loads its gp on entry. */
7353 if (current_function_has_nonlocal_goto)
7354 return 1;
7355
7356 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7357 Even if we are a static function, we still need to do this in case
7358 our address is taken and passed to something like qsort. */
7359
7360 push_topmost_sequence ();
7361 insn = get_insns ();
7362 pop_topmost_sequence ();
7363
7364 for (; insn; insn = NEXT_INSN (insn))
7365 if (INSN_P (insn)
7366 && GET_CODE (PATTERN (insn)) != USE
7367 && GET_CODE (PATTERN (insn)) != CLOBBER
7368 && get_attr_usegp (insn))
7369 return 1;
7370
7371 return 0;
7372 }
7373
7374 \f
7375 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7376 sequences. */
7377
7378 static rtx
7379 set_frame_related_p (void)
7380 {
7381 rtx seq = get_insns ();
7382 rtx insn;
7383
7384 end_sequence ();
7385
7386 if (!seq)
7387 return NULL_RTX;
7388
7389 if (INSN_P (seq))
7390 {
7391 insn = seq;
7392 while (insn != NULL_RTX)
7393 {
7394 RTX_FRAME_RELATED_P (insn) = 1;
7395 insn = NEXT_INSN (insn);
7396 }
7397 seq = emit_insn (seq);
7398 }
7399 else
7400 {
7401 seq = emit_insn (seq);
7402 RTX_FRAME_RELATED_P (seq) = 1;
7403 }
7404 return seq;
7405 }
7406
7407 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7408
7409 /* Generates a store with the proper unwind info attached. VALUE is
7410 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7411 contains SP+FRAME_BIAS, and that is the unwind info that should be
7412 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7413 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7414
7415 static void
7416 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7417 HOST_WIDE_INT base_ofs, rtx frame_reg)
7418 {
7419 rtx addr, mem, insn;
7420
7421 addr = plus_constant (base_reg, base_ofs);
7422 mem = gen_rtx_MEM (DImode, addr);
7423 set_mem_alias_set (mem, alpha_sr_alias_set);
7424
7425 insn = emit_move_insn (mem, value);
7426 RTX_FRAME_RELATED_P (insn) = 1;
7427
7428 if (frame_bias || value != frame_reg)
7429 {
7430 if (frame_bias)
7431 {
7432 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7433 mem = gen_rtx_MEM (DImode, addr);
7434 }
7435
7436 REG_NOTES (insn)
7437 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7438 gen_rtx_SET (VOIDmode, mem, frame_reg),
7439 REG_NOTES (insn));
7440 }
7441 }
7442
7443 static void
7444 emit_frame_store (unsigned int regno, rtx base_reg,
7445 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7446 {
7447 rtx reg = gen_rtx_REG (DImode, regno);
7448 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7449 }
7450
7451 /* Write function prologue. */
7452
7453 /* On vms we have two kinds of functions:
7454
7455 - stack frame (PROC_STACK)
7456 these are 'normal' functions with local vars and which are
7457 calling other functions
7458 - register frame (PROC_REGISTER)
7459 keeps all data in registers, needs no stack
7460
7461 We must pass this to the assembler so it can generate the
7462 proper pdsc (procedure descriptor)
7463 This is done with the '.pdesc' command.
7464
7465 On not-vms, we don't really differentiate between the two, as we can
7466 simply allocate stack without saving registers. */
7467
7468 void
7469 alpha_expand_prologue (void)
7470 {
7471 /* Registers to save. */
7472 unsigned long imask = 0;
7473 unsigned long fmask = 0;
7474 /* Stack space needed for pushing registers clobbered by us. */
7475 HOST_WIDE_INT sa_size;
7476 /* Complete stack size needed. */
7477 HOST_WIDE_INT frame_size;
7478 /* Offset from base reg to register save area. */
7479 HOST_WIDE_INT reg_offset;
7480 rtx sa_reg;
7481 int i;
7482
7483 sa_size = alpha_sa_size ();
7484
7485 frame_size = get_frame_size ();
7486 if (TARGET_ABI_OPEN_VMS)
7487 frame_size = ALPHA_ROUND (sa_size
7488 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7489 + frame_size
7490 + current_function_pretend_args_size);
7491 else if (TARGET_ABI_UNICOSMK)
7492 /* We have to allocate space for the DSIB if we generate a frame. */
7493 frame_size = ALPHA_ROUND (sa_size
7494 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7495 + ALPHA_ROUND (frame_size
7496 + current_function_outgoing_args_size);
7497 else
7498 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7499 + sa_size
7500 + ALPHA_ROUND (frame_size
7501 + current_function_pretend_args_size));
7502
7503 if (TARGET_ABI_OPEN_VMS)
7504 reg_offset = 8;
7505 else
7506 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7507
7508 alpha_sa_mask (&imask, &fmask);
7509
7510 /* Emit an insn to reload GP, if needed. */
7511 if (TARGET_ABI_OSF)
7512 {
7513 alpha_function_needs_gp = alpha_does_function_need_gp ();
7514 if (alpha_function_needs_gp)
7515 emit_insn (gen_prologue_ldgp ());
7516 }
7517
7518 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7519 the call to mcount ourselves, rather than having the linker do it
7520 magically in response to -pg. Since _mcount has special linkage,
7521 don't represent the call as a call. */
7522 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7523 emit_insn (gen_prologue_mcount ());
7524
7525 if (TARGET_ABI_UNICOSMK)
7526 unicosmk_gen_dsib (&imask);
7527
7528 /* Adjust the stack by the frame size. If the frame size is > 4096
7529 bytes, we need to be sure we probe somewhere in the first and last
7530 4096 bytes (we can probably get away without the latter test) and
7531 every 8192 bytes in between. If the frame size is > 32768, we
7532 do this in a loop. Otherwise, we generate the explicit probe
7533 instructions.
7534
7535 Note that we are only allowed to adjust sp once in the prologue. */
7536
7537 if (frame_size <= 32768)
7538 {
7539 if (frame_size > 4096)
7540 {
7541 int probed = 4096;
7542
7543 do
7544 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7545 ? -probed + 64
7546 : -probed)));
7547 while ((probed += 8192) < frame_size);
7548
7549 /* We only have to do this probe if we aren't saving registers. */
7550 if (sa_size == 0 && probed + 4096 < frame_size)
7551 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7552 }
7553
7554 if (frame_size != 0)
7555 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7556 GEN_INT (TARGET_ABI_UNICOSMK
7557 ? -frame_size + 64
7558 : -frame_size))));
7559 }
7560 else
7561 {
7562 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7563 number of 8192 byte blocks to probe. We then probe each block
7564 in the loop and then set SP to the proper location. If the
7565 amount remaining is > 4096, we have to do one more probe if we
7566 are not saving any registers. */
7567
7568 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7569 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7570 rtx ptr = gen_rtx_REG (DImode, 22);
7571 rtx count = gen_rtx_REG (DImode, 23);
7572 rtx seq;
7573
7574 emit_move_insn (count, GEN_INT (blocks));
7575 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7576 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7577
7578 /* Because of the difficulty in emitting a new basic block this
7579 late in the compilation, generate the loop as a single insn. */
7580 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7581
7582 if (leftover > 4096 && sa_size == 0)
7583 {
7584 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7585 MEM_VOLATILE_P (last) = 1;
7586 emit_move_insn (last, const0_rtx);
7587 }
7588
7589 if (TARGET_ABI_WINDOWS_NT)
7590 {
7591 /* For NT stack unwind (done by 'reverse execution'), it's
7592 not OK to take the result of a loop, even though the value
7593 is already in ptr, so we reload it via a single operation
7594 and subtract it to sp.
7595
7596 Yes, that's correct -- we have to reload the whole constant
7597 into a temporary via ldah+lda then subtract from sp. */
7598
7599 HOST_WIDE_INT lo, hi;
7600 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7601 hi = frame_size - lo;
7602
7603 emit_move_insn (ptr, GEN_INT (hi));
7604 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7605 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7606 ptr));
7607 }
7608 else
7609 {
7610 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7611 GEN_INT (-leftover)));
7612 }
7613
7614 /* This alternative is special, because the DWARF code cannot
7615 possibly intuit through the loop above. So we invent this
7616 note it looks at instead. */
7617 RTX_FRAME_RELATED_P (seq) = 1;
7618 REG_NOTES (seq)
7619 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7620 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7621 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7622 GEN_INT (TARGET_ABI_UNICOSMK
7623 ? -frame_size + 64
7624 : -frame_size))),
7625 REG_NOTES (seq));
7626 }
7627
7628 if (!TARGET_ABI_UNICOSMK)
7629 {
7630 HOST_WIDE_INT sa_bias = 0;
7631
7632 /* Cope with very large offsets to the register save area. */
7633 sa_reg = stack_pointer_rtx;
7634 if (reg_offset + sa_size > 0x8000)
7635 {
7636 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7637 rtx sa_bias_rtx;
7638
7639 if (low + sa_size <= 0x8000)
7640 sa_bias = reg_offset - low, reg_offset = low;
7641 else
7642 sa_bias = reg_offset, reg_offset = 0;
7643
7644 sa_reg = gen_rtx_REG (DImode, 24);
7645 sa_bias_rtx = GEN_INT (sa_bias);
7646
7647 if (add_operand (sa_bias_rtx, DImode))
7648 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7649 else
7650 {
7651 emit_move_insn (sa_reg, sa_bias_rtx);
7652 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7653 }
7654 }
7655
7656 /* Save regs in stack order. Beginning with VMS PV. */
7657 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7658 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7659
7660 /* Save register RA next. */
7661 if (imask & (1UL << REG_RA))
7662 {
7663 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7664 imask &= ~(1UL << REG_RA);
7665 reg_offset += 8;
7666 }
7667
7668 /* Now save any other registers required to be saved. */
7669 for (i = 0; i < 31; i++)
7670 if (imask & (1UL << i))
7671 {
7672 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7673 reg_offset += 8;
7674 }
7675
7676 for (i = 0; i < 31; i++)
7677 if (fmask & (1UL << i))
7678 {
7679 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7680 reg_offset += 8;
7681 }
7682 }
7683 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7684 {
7685 /* The standard frame on the T3E includes space for saving registers.
7686 We just have to use it. We don't have to save the return address and
7687 the old frame pointer here - they are saved in the DSIB. */
7688
7689 reg_offset = -56;
7690 for (i = 9; i < 15; i++)
7691 if (imask & (1UL << i))
7692 {
7693 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7694 reg_offset -= 8;
7695 }
7696 for (i = 2; i < 10; i++)
7697 if (fmask & (1UL << i))
7698 {
7699 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7700 reg_offset -= 8;
7701 }
7702 }
7703
7704 if (TARGET_ABI_OPEN_VMS)
7705 {
7706 if (alpha_procedure_type == PT_REGISTER)
7707 /* Register frame procedures save the fp.
7708 ?? Ought to have a dwarf2 save for this. */
7709 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7710 hard_frame_pointer_rtx);
7711
7712 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7713 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7714 gen_rtx_REG (DImode, REG_PV)));
7715
7716 if (alpha_procedure_type != PT_NULL
7717 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7718 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7719
7720 /* If we have to allocate space for outgoing args, do it now. */
7721 if (current_function_outgoing_args_size != 0)
7722 {
7723 rtx seq
7724 = emit_move_insn (stack_pointer_rtx,
7725 plus_constant
7726 (hard_frame_pointer_rtx,
7727 - (ALPHA_ROUND
7728 (current_function_outgoing_args_size))));
7729
7730 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7731 if ! frame_pointer_needed. Setting the bit will change the CFA
7732 computation rule to use sp again, which would be wrong if we had
7733 frame_pointer_needed, as this means sp might move unpredictably
7734 later on.
7735
7736 Also, note that
7737 frame_pointer_needed
7738 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7739 and
7740 current_function_outgoing_args_size != 0
7741 => alpha_procedure_type != PT_NULL,
7742
7743 so when we are not setting the bit here, we are guaranteed to
7744 have emitted an FRP frame pointer update just before. */
7745 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7746 }
7747 }
7748 else if (!TARGET_ABI_UNICOSMK)
7749 {
7750 /* If we need a frame pointer, set it from the stack pointer. */
7751 if (frame_pointer_needed)
7752 {
7753 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7754 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7755 else
7756 /* This must always be the last instruction in the
7757 prologue, thus we emit a special move + clobber. */
7758 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7759 stack_pointer_rtx, sa_reg)));
7760 }
7761 }
7762
7763 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7764 the prologue, for exception handling reasons, we cannot do this for
7765 any insn that might fault. We could prevent this for mems with a
7766 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7767 have to prevent all such scheduling with a blockage.
7768
7769 Linux, on the other hand, never bothered to implement OSF/1's
7770 exception handling, and so doesn't care about such things. Anyone
7771 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7772
7773 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7774 emit_insn (gen_blockage ());
7775 }
7776
7777 /* Count the number of .file directives, so that .loc is up to date. */
7778 int num_source_filenames = 0;
7779
7780 /* Output the textual info surrounding the prologue. */
7781
7782 void
7783 alpha_start_function (FILE *file, const char *fnname,
7784 tree decl ATTRIBUTE_UNUSED)
7785 {
7786 unsigned long imask = 0;
7787 unsigned long fmask = 0;
7788 /* Stack space needed for pushing registers clobbered by us. */
7789 HOST_WIDE_INT sa_size;
7790 /* Complete stack size needed. */
7791 unsigned HOST_WIDE_INT frame_size;
7792 /* Offset from base reg to register save area. */
7793 HOST_WIDE_INT reg_offset;
7794 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7795 int i;
7796
7797 /* Don't emit an extern directive for functions defined in the same file. */
7798 if (TARGET_ABI_UNICOSMK)
7799 {
7800 tree name_tree;
7801 name_tree = get_identifier (fnname);
7802 TREE_ASM_WRITTEN (name_tree) = 1;
7803 }
7804
7805 alpha_fnname = fnname;
7806 sa_size = alpha_sa_size ();
7807
7808 frame_size = get_frame_size ();
7809 if (TARGET_ABI_OPEN_VMS)
7810 frame_size = ALPHA_ROUND (sa_size
7811 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7812 + frame_size
7813 + current_function_pretend_args_size);
7814 else if (TARGET_ABI_UNICOSMK)
7815 frame_size = ALPHA_ROUND (sa_size
7816 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7817 + ALPHA_ROUND (frame_size
7818 + current_function_outgoing_args_size);
7819 else
7820 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7821 + sa_size
7822 + ALPHA_ROUND (frame_size
7823 + current_function_pretend_args_size));
7824
7825 if (TARGET_ABI_OPEN_VMS)
7826 reg_offset = 8;
7827 else
7828 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7829
7830 alpha_sa_mask (&imask, &fmask);
7831
7832 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7833 We have to do that before the .ent directive as we cannot switch
7834 files within procedures with native ecoff because line numbers are
7835 linked to procedure descriptors.
7836 Outputting the lineno helps debugging of one line functions as they
7837 would otherwise get no line number at all. Please note that we would
7838 like to put out last_linenum from final.c, but it is not accessible. */
7839
7840 if (write_symbols == SDB_DEBUG)
7841 {
7842 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7843 ASM_OUTPUT_SOURCE_FILENAME (file,
7844 DECL_SOURCE_FILE (current_function_decl));
7845 #endif
7846 #ifdef SDB_OUTPUT_SOURCE_LINE
7847 if (debug_info_level != DINFO_LEVEL_TERSE)
7848 SDB_OUTPUT_SOURCE_LINE (file,
7849 DECL_SOURCE_LINE (current_function_decl));
7850 #endif
7851 }
7852
7853 /* Issue function start and label. */
7854 if (TARGET_ABI_OPEN_VMS
7855 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7856 {
7857 fputs ("\t.ent ", file);
7858 assemble_name (file, fnname);
7859 putc ('\n', file);
7860
7861 /* If the function needs GP, we'll write the "..ng" label there.
7862 Otherwise, do it here. */
7863 if (TARGET_ABI_OSF
7864 && ! alpha_function_needs_gp
7865 && ! current_function_is_thunk)
7866 {
7867 putc ('$', file);
7868 assemble_name (file, fnname);
7869 fputs ("..ng:\n", file);
7870 }
7871 }
7872
7873 strcpy (entry_label, fnname);
7874 if (TARGET_ABI_OPEN_VMS)
7875 strcat (entry_label, "..en");
7876
7877 /* For public functions, the label must be globalized by appending an
7878 additional colon. */
7879 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7880 strcat (entry_label, ":");
7881
7882 ASM_OUTPUT_LABEL (file, entry_label);
7883 inside_function = TRUE;
7884
7885 if (TARGET_ABI_OPEN_VMS)
7886 fprintf (file, "\t.base $%d\n", vms_base_regno);
7887
7888 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7889 && !flag_inhibit_size_directive)
7890 {
7891 /* Set flags in procedure descriptor to request IEEE-conformant
7892 math-library routines. The value we set it to is PDSC_EXC_IEEE
7893 (/usr/include/pdsc.h). */
7894 fputs ("\t.eflag 48\n", file);
7895 }
7896
7897 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7898 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7899 alpha_arg_offset = -frame_size + 48;
7900
7901 /* Describe our frame. If the frame size is larger than an integer,
7902 print it as zero to avoid an assembler error. We won't be
7903 properly describing such a frame, but that's the best we can do. */
7904 if (TARGET_ABI_UNICOSMK)
7905 ;
7906 else if (TARGET_ABI_OPEN_VMS)
7907 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7908 HOST_WIDE_INT_PRINT_DEC "\n",
7909 vms_unwind_regno,
7910 frame_size >= (1UL << 31) ? 0 : frame_size,
7911 reg_offset);
7912 else if (!flag_inhibit_size_directive)
7913 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7914 (frame_pointer_needed
7915 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7916 frame_size >= (1UL << 31) ? 0 : frame_size,
7917 current_function_pretend_args_size);
7918
7919 /* Describe which registers were spilled. */
7920 if (TARGET_ABI_UNICOSMK)
7921 ;
7922 else if (TARGET_ABI_OPEN_VMS)
7923 {
7924 if (imask)
7925 /* ??? Does VMS care if mask contains ra? The old code didn't
7926 set it, so I don't here. */
7927 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7928 if (fmask)
7929 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7930 if (alpha_procedure_type == PT_REGISTER)
7931 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7932 }
7933 else if (!flag_inhibit_size_directive)
7934 {
7935 if (imask)
7936 {
7937 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7938 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7939
7940 for (i = 0; i < 32; ++i)
7941 if (imask & (1UL << i))
7942 reg_offset += 8;
7943 }
7944
7945 if (fmask)
7946 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7947 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7948 }
7949
7950 #if TARGET_ABI_OPEN_VMS
7951 /* Ifdef'ed cause link_section are only available then. */
7952 readonly_data_section ();
7953 fprintf (file, "\t.align 3\n");
7954 assemble_name (file, fnname); fputs ("..na:\n", file);
7955 fputs ("\t.ascii \"", file);
7956 assemble_name (file, fnname);
7957 fputs ("\\0\"\n", file);
7958 alpha_need_linkage (fnname, 1);
7959 text_section ();
7960 #endif
7961 }
7962
7963 /* Emit the .prologue note at the scheduled end of the prologue. */
7964
7965 static void
7966 alpha_output_function_end_prologue (FILE *file)
7967 {
7968 if (TARGET_ABI_UNICOSMK)
7969 ;
7970 else if (TARGET_ABI_OPEN_VMS)
7971 fputs ("\t.prologue\n", file);
7972 else if (TARGET_ABI_WINDOWS_NT)
7973 fputs ("\t.prologue 0\n", file);
7974 else if (!flag_inhibit_size_directive)
7975 fprintf (file, "\t.prologue %d\n",
7976 alpha_function_needs_gp || current_function_is_thunk);
7977 }
7978
7979 /* Write function epilogue. */
7980
7981 /* ??? At some point we will want to support full unwind, and so will
7982 need to mark the epilogue as well. At the moment, we just confuse
7983 dwarf2out. */
7984 #undef FRP
7985 #define FRP(exp) exp
7986
7987 void
7988 alpha_expand_epilogue (void)
7989 {
7990 /* Registers to save. */
7991 unsigned long imask = 0;
7992 unsigned long fmask = 0;
7993 /* Stack space needed for pushing registers clobbered by us. */
7994 HOST_WIDE_INT sa_size;
7995 /* Complete stack size needed. */
7996 HOST_WIDE_INT frame_size;
7997 /* Offset from base reg to register save area. */
7998 HOST_WIDE_INT reg_offset;
7999 int fp_is_frame_pointer, fp_offset;
8000 rtx sa_reg, sa_reg_exp = NULL;
8001 rtx sp_adj1, sp_adj2, mem;
8002 rtx eh_ofs;
8003 int i;
8004
8005 sa_size = alpha_sa_size ();
8006
8007 frame_size = get_frame_size ();
8008 if (TARGET_ABI_OPEN_VMS)
8009 frame_size = ALPHA_ROUND (sa_size
8010 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8011 + frame_size
8012 + current_function_pretend_args_size);
8013 else if (TARGET_ABI_UNICOSMK)
8014 frame_size = ALPHA_ROUND (sa_size
8015 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8016 + ALPHA_ROUND (frame_size
8017 + current_function_outgoing_args_size);
8018 else
8019 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8020 + sa_size
8021 + ALPHA_ROUND (frame_size
8022 + current_function_pretend_args_size));
8023
8024 if (TARGET_ABI_OPEN_VMS)
8025 {
8026 if (alpha_procedure_type == PT_STACK)
8027 reg_offset = 8;
8028 else
8029 reg_offset = 0;
8030 }
8031 else
8032 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8033
8034 alpha_sa_mask (&imask, &fmask);
8035
8036 fp_is_frame_pointer
8037 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8038 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8039 fp_offset = 0;
8040 sa_reg = stack_pointer_rtx;
8041
8042 if (current_function_calls_eh_return)
8043 eh_ofs = EH_RETURN_STACKADJ_RTX;
8044 else
8045 eh_ofs = NULL_RTX;
8046
8047 if (!TARGET_ABI_UNICOSMK && sa_size)
8048 {
8049 /* If we have a frame pointer, restore SP from it. */
8050 if ((TARGET_ABI_OPEN_VMS
8051 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8052 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8053 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8054
8055 /* Cope with very large offsets to the register save area. */
8056 if (reg_offset + sa_size > 0x8000)
8057 {
8058 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8059 HOST_WIDE_INT bias;
8060
8061 if (low + sa_size <= 0x8000)
8062 bias = reg_offset - low, reg_offset = low;
8063 else
8064 bias = reg_offset, reg_offset = 0;
8065
8066 sa_reg = gen_rtx_REG (DImode, 22);
8067 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8068
8069 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8070 }
8071
8072 /* Restore registers in order, excepting a true frame pointer. */
8073
8074 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8075 if (! eh_ofs)
8076 set_mem_alias_set (mem, alpha_sr_alias_set);
8077 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8078
8079 reg_offset += 8;
8080 imask &= ~(1UL << REG_RA);
8081
8082 for (i = 0; i < 31; ++i)
8083 if (imask & (1UL << i))
8084 {
8085 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8086 fp_offset = reg_offset;
8087 else
8088 {
8089 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8090 set_mem_alias_set (mem, alpha_sr_alias_set);
8091 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8092 }
8093 reg_offset += 8;
8094 }
8095
8096 for (i = 0; i < 31; ++i)
8097 if (fmask & (1UL << i))
8098 {
8099 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8100 set_mem_alias_set (mem, alpha_sr_alias_set);
8101 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8102 reg_offset += 8;
8103 }
8104 }
8105 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8106 {
8107 /* Restore callee-saved general-purpose registers. */
8108
8109 reg_offset = -56;
8110
8111 for (i = 9; i < 15; i++)
8112 if (imask & (1UL << i))
8113 {
8114 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8115 reg_offset));
8116 set_mem_alias_set (mem, alpha_sr_alias_set);
8117 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8118 reg_offset -= 8;
8119 }
8120
8121 for (i = 2; i < 10; i++)
8122 if (fmask & (1UL << i))
8123 {
8124 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8125 reg_offset));
8126 set_mem_alias_set (mem, alpha_sr_alias_set);
8127 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8128 reg_offset -= 8;
8129 }
8130
8131 /* Restore the return address from the DSIB. */
8132
8133 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8134 set_mem_alias_set (mem, alpha_sr_alias_set);
8135 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8136 }
8137
8138 if (frame_size || eh_ofs)
8139 {
8140 sp_adj1 = stack_pointer_rtx;
8141
8142 if (eh_ofs)
8143 {
8144 sp_adj1 = gen_rtx_REG (DImode, 23);
8145 emit_move_insn (sp_adj1,
8146 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8147 }
8148
8149 /* If the stack size is large, begin computation into a temporary
8150 register so as not to interfere with a potential fp restore,
8151 which must be consecutive with an SP restore. */
8152 if (frame_size < 32768
8153 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8154 sp_adj2 = GEN_INT (frame_size);
8155 else if (TARGET_ABI_UNICOSMK)
8156 {
8157 sp_adj1 = gen_rtx_REG (DImode, 23);
8158 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8159 sp_adj2 = const0_rtx;
8160 }
8161 else if (frame_size < 0x40007fffL)
8162 {
8163 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8164
8165 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8166 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8167 sp_adj1 = sa_reg;
8168 else
8169 {
8170 sp_adj1 = gen_rtx_REG (DImode, 23);
8171 FRP (emit_move_insn (sp_adj1, sp_adj2));
8172 }
8173 sp_adj2 = GEN_INT (low);
8174 }
8175 else
8176 {
8177 rtx tmp = gen_rtx_REG (DImode, 23);
8178 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8179 3, false));
8180 if (!sp_adj2)
8181 {
8182 /* We can't drop new things to memory this late, afaik,
8183 so build it up by pieces. */
8184 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8185 -(frame_size < 0)));
8186 gcc_assert (sp_adj2);
8187 }
8188 }
8189
8190 /* From now on, things must be in order. So emit blockages. */
8191
8192 /* Restore the frame pointer. */
8193 if (TARGET_ABI_UNICOSMK)
8194 {
8195 emit_insn (gen_blockage ());
8196 mem = gen_rtx_MEM (DImode,
8197 plus_constant (hard_frame_pointer_rtx, -16));
8198 set_mem_alias_set (mem, alpha_sr_alias_set);
8199 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8200 }
8201 else if (fp_is_frame_pointer)
8202 {
8203 emit_insn (gen_blockage ());
8204 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8205 set_mem_alias_set (mem, alpha_sr_alias_set);
8206 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8207 }
8208 else if (TARGET_ABI_OPEN_VMS)
8209 {
8210 emit_insn (gen_blockage ());
8211 FRP (emit_move_insn (hard_frame_pointer_rtx,
8212 gen_rtx_REG (DImode, vms_save_fp_regno)));
8213 }
8214
8215 /* Restore the stack pointer. */
8216 emit_insn (gen_blockage ());
8217 if (sp_adj2 == const0_rtx)
8218 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8219 else
8220 FRP (emit_move_insn (stack_pointer_rtx,
8221 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8222 }
8223 else
8224 {
8225 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8226 {
8227 emit_insn (gen_blockage ());
8228 FRP (emit_move_insn (hard_frame_pointer_rtx,
8229 gen_rtx_REG (DImode, vms_save_fp_regno)));
8230 }
8231 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8232 {
8233 /* Decrement the frame pointer if the function does not have a
8234 frame. */
8235
8236 emit_insn (gen_blockage ());
8237 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8238 hard_frame_pointer_rtx, constm1_rtx)));
8239 }
8240 }
8241 }
8242 \f
8243 /* Output the rest of the textual info surrounding the epilogue. */
8244
8245 void
8246 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8247 {
8248 #if TARGET_ABI_OPEN_VMS
8249 alpha_write_linkage (file, fnname, decl);
8250 #endif
8251
8252 /* End the function. */
8253 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8254 {
8255 fputs ("\t.end ", file);
8256 assemble_name (file, fnname);
8257 putc ('\n', file);
8258 }
8259 inside_function = FALSE;
8260
8261 /* Output jump tables and the static subroutine information block. */
8262 if (TARGET_ABI_UNICOSMK)
8263 {
8264 unicosmk_output_ssib (file, fnname);
8265 unicosmk_output_deferred_case_vectors (file);
8266 }
8267 }
8268
8269 #if TARGET_ABI_OSF
8270 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8271
8272 In order to avoid the hordes of differences between generated code
8273 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8274 lots of code loading up large constants, generate rtl and emit it
8275 instead of going straight to text.
8276
8277 Not sure why this idea hasn't been explored before... */
8278
8279 static void
8280 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8281 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8282 tree function)
8283 {
8284 HOST_WIDE_INT hi, lo;
8285 rtx this, insn, funexp;
8286
8287 reset_block_changes ();
8288
8289 /* We always require a valid GP. */
8290 emit_insn (gen_prologue_ldgp ());
8291 emit_note (NOTE_INSN_PROLOGUE_END);
8292
8293 /* Find the "this" pointer. If the function returns a structure,
8294 the structure return pointer is in $16. */
8295 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8296 this = gen_rtx_REG (Pmode, 17);
8297 else
8298 this = gen_rtx_REG (Pmode, 16);
8299
8300 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8301 entire constant for the add. */
8302 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8303 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8304 if (hi + lo == delta)
8305 {
8306 if (hi)
8307 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8308 if (lo)
8309 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8310 }
8311 else
8312 {
8313 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8314 delta, -(delta < 0));
8315 emit_insn (gen_adddi3 (this, this, tmp));
8316 }
8317
8318 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8319 if (vcall_offset)
8320 {
8321 rtx tmp, tmp2;
8322
8323 tmp = gen_rtx_REG (Pmode, 0);
8324 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8325
8326 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8327 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8328 if (hi + lo == vcall_offset)
8329 {
8330 if (hi)
8331 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8332 }
8333 else
8334 {
8335 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8336 vcall_offset, -(vcall_offset < 0));
8337 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8338 lo = 0;
8339 }
8340 if (lo)
8341 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8342 else
8343 tmp2 = tmp;
8344 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8345
8346 emit_insn (gen_adddi3 (this, this, tmp));
8347 }
8348
8349 /* Generate a tail call to the target function. */
8350 if (! TREE_USED (function))
8351 {
8352 assemble_external (function);
8353 TREE_USED (function) = 1;
8354 }
8355 funexp = XEXP (DECL_RTL (function), 0);
8356 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8357 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8358 SIBLING_CALL_P (insn) = 1;
8359
8360 /* Run just enough of rest_of_compilation to get the insns emitted.
8361 There's not really enough bulk here to make other passes such as
8362 instruction scheduling worth while. Note that use_thunk calls
8363 assemble_start_function and assemble_end_function. */
8364 insn = get_insns ();
8365 insn_locators_initialize ();
8366 shorten_branches (insn);
8367 final_start_function (insn, file, 1);
8368 final (insn, file, 1);
8369 final_end_function ();
8370 }
8371 #endif /* TARGET_ABI_OSF */
8372 \f
8373 /* Debugging support. */
8374
8375 #include "gstab.h"
8376
8377 /* Count the number of sdb related labels are generated (to find block
8378 start and end boundaries). */
8379
8380 int sdb_label_count = 0;
8381
8382 /* Name of the file containing the current function. */
8383
8384 static const char *current_function_file = "";
8385
8386 /* Offsets to alpha virtual arg/local debugging pointers. */
8387
8388 long alpha_arg_offset;
8389 long alpha_auto_offset;
8390 \f
8391 /* Emit a new filename to a stream. */
8392
8393 void
8394 alpha_output_filename (FILE *stream, const char *name)
8395 {
8396 static int first_time = TRUE;
8397
8398 if (first_time)
8399 {
8400 first_time = FALSE;
8401 ++num_source_filenames;
8402 current_function_file = name;
8403 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8404 output_quoted_string (stream, name);
8405 fprintf (stream, "\n");
8406 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8407 fprintf (stream, "\t#@stabs\n");
8408 }
8409
8410 else if (write_symbols == DBX_DEBUG)
8411 /* dbxout.c will emit an appropriate .stabs directive. */
8412 return;
8413
8414 else if (name != current_function_file
8415 && strcmp (name, current_function_file) != 0)
8416 {
8417 if (inside_function && ! TARGET_GAS)
8418 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8419 else
8420 {
8421 ++num_source_filenames;
8422 current_function_file = name;
8423 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8424 }
8425
8426 output_quoted_string (stream, name);
8427 fprintf (stream, "\n");
8428 }
8429 }
8430 \f
8431 /* Structure to show the current status of registers and memory. */
8432
8433 struct shadow_summary
8434 {
8435 struct {
8436 unsigned int i : 31; /* Mask of int regs */
8437 unsigned int fp : 31; /* Mask of fp regs */
8438 unsigned int mem : 1; /* mem == imem | fpmem */
8439 } used, defd;
8440 };
8441
8442 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8443 to the summary structure. SET is nonzero if the insn is setting the
8444 object, otherwise zero. */
8445
8446 static void
8447 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8448 {
8449 const char *format_ptr;
8450 int i, j;
8451
8452 if (x == 0)
8453 return;
8454
8455 switch (GET_CODE (x))
8456 {
8457 /* ??? Note that this case would be incorrect if the Alpha had a
8458 ZERO_EXTRACT in SET_DEST. */
8459 case SET:
8460 summarize_insn (SET_SRC (x), sum, 0);
8461 summarize_insn (SET_DEST (x), sum, 1);
8462 break;
8463
8464 case CLOBBER:
8465 summarize_insn (XEXP (x, 0), sum, 1);
8466 break;
8467
8468 case USE:
8469 summarize_insn (XEXP (x, 0), sum, 0);
8470 break;
8471
8472 case ASM_OPERANDS:
8473 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8474 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8475 break;
8476
8477 case PARALLEL:
8478 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8479 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8480 break;
8481
8482 case SUBREG:
8483 summarize_insn (SUBREG_REG (x), sum, 0);
8484 break;
8485
8486 case REG:
8487 {
8488 int regno = REGNO (x);
8489 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8490
8491 if (regno == 31 || regno == 63)
8492 break;
8493
8494 if (set)
8495 {
8496 if (regno < 32)
8497 sum->defd.i |= mask;
8498 else
8499 sum->defd.fp |= mask;
8500 }
8501 else
8502 {
8503 if (regno < 32)
8504 sum->used.i |= mask;
8505 else
8506 sum->used.fp |= mask;
8507 }
8508 }
8509 break;
8510
8511 case MEM:
8512 if (set)
8513 sum->defd.mem = 1;
8514 else
8515 sum->used.mem = 1;
8516
8517 /* Find the regs used in memory address computation: */
8518 summarize_insn (XEXP (x, 0), sum, 0);
8519 break;
8520
8521 case CONST_INT: case CONST_DOUBLE:
8522 case SYMBOL_REF: case LABEL_REF: case CONST:
8523 case SCRATCH: case ASM_INPUT:
8524 break;
8525
8526 /* Handle common unary and binary ops for efficiency. */
8527 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8528 case MOD: case UDIV: case UMOD: case AND: case IOR:
8529 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8530 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8531 case NE: case EQ: case GE: case GT: case LE:
8532 case LT: case GEU: case GTU: case LEU: case LTU:
8533 summarize_insn (XEXP (x, 0), sum, 0);
8534 summarize_insn (XEXP (x, 1), sum, 0);
8535 break;
8536
8537 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8538 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8539 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8540 case SQRT: case FFS:
8541 summarize_insn (XEXP (x, 0), sum, 0);
8542 break;
8543
8544 default:
8545 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8546 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8547 switch (format_ptr[i])
8548 {
8549 case 'e':
8550 summarize_insn (XEXP (x, i), sum, 0);
8551 break;
8552
8553 case 'E':
8554 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8555 summarize_insn (XVECEXP (x, i, j), sum, 0);
8556 break;
8557
8558 case 'i':
8559 break;
8560
8561 default:
8562 gcc_unreachable ();
8563 }
8564 }
8565 }
8566
8567 /* Ensure a sufficient number of `trapb' insns are in the code when
8568 the user requests code with a trap precision of functions or
8569 instructions.
8570
8571 In naive mode, when the user requests a trap-precision of
8572 "instruction", a trapb is needed after every instruction that may
8573 generate a trap. This ensures that the code is resumption safe but
8574 it is also slow.
8575
8576 When optimizations are turned on, we delay issuing a trapb as long
8577 as possible. In this context, a trap shadow is the sequence of
8578 instructions that starts with a (potentially) trap generating
8579 instruction and extends to the next trapb or call_pal instruction
8580 (but GCC never generates call_pal by itself). We can delay (and
8581 therefore sometimes omit) a trapb subject to the following
8582 conditions:
8583
8584 (a) On entry to the trap shadow, if any Alpha register or memory
8585 location contains a value that is used as an operand value by some
8586 instruction in the trap shadow (live on entry), then no instruction
8587 in the trap shadow may modify the register or memory location.
8588
8589 (b) Within the trap shadow, the computation of the base register
8590 for a memory load or store instruction may not involve using the
8591 result of an instruction that might generate an UNPREDICTABLE
8592 result.
8593
8594 (c) Within the trap shadow, no register may be used more than once
8595 as a destination register. (This is to make life easier for the
8596 trap-handler.)
8597
8598 (d) The trap shadow may not include any branch instructions. */
8599
8600 static void
8601 alpha_handle_trap_shadows (void)
8602 {
8603 struct shadow_summary shadow;
8604 int trap_pending, exception_nesting;
8605 rtx i, n;
8606
8607 trap_pending = 0;
8608 exception_nesting = 0;
8609 shadow.used.i = 0;
8610 shadow.used.fp = 0;
8611 shadow.used.mem = 0;
8612 shadow.defd = shadow.used;
8613
8614 for (i = get_insns (); i ; i = NEXT_INSN (i))
8615 {
8616 if (GET_CODE (i) == NOTE)
8617 {
8618 switch (NOTE_LINE_NUMBER (i))
8619 {
8620 case NOTE_INSN_EH_REGION_BEG:
8621 exception_nesting++;
8622 if (trap_pending)
8623 goto close_shadow;
8624 break;
8625
8626 case NOTE_INSN_EH_REGION_END:
8627 exception_nesting--;
8628 if (trap_pending)
8629 goto close_shadow;
8630 break;
8631
8632 case NOTE_INSN_EPILOGUE_BEG:
8633 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8634 goto close_shadow;
8635 break;
8636 }
8637 }
8638 else if (trap_pending)
8639 {
8640 if (alpha_tp == ALPHA_TP_FUNC)
8641 {
8642 if (GET_CODE (i) == JUMP_INSN
8643 && GET_CODE (PATTERN (i)) == RETURN)
8644 goto close_shadow;
8645 }
8646 else if (alpha_tp == ALPHA_TP_INSN)
8647 {
8648 if (optimize > 0)
8649 {
8650 struct shadow_summary sum;
8651
8652 sum.used.i = 0;
8653 sum.used.fp = 0;
8654 sum.used.mem = 0;
8655 sum.defd = sum.used;
8656
8657 switch (GET_CODE (i))
8658 {
8659 case INSN:
8660 /* Annoyingly, get_attr_trap will die on these. */
8661 if (GET_CODE (PATTERN (i)) == USE
8662 || GET_CODE (PATTERN (i)) == CLOBBER)
8663 break;
8664
8665 summarize_insn (PATTERN (i), &sum, 0);
8666
8667 if ((sum.defd.i & shadow.defd.i)
8668 || (sum.defd.fp & shadow.defd.fp))
8669 {
8670 /* (c) would be violated */
8671 goto close_shadow;
8672 }
8673
8674 /* Combine shadow with summary of current insn: */
8675 shadow.used.i |= sum.used.i;
8676 shadow.used.fp |= sum.used.fp;
8677 shadow.used.mem |= sum.used.mem;
8678 shadow.defd.i |= sum.defd.i;
8679 shadow.defd.fp |= sum.defd.fp;
8680 shadow.defd.mem |= sum.defd.mem;
8681
8682 if ((sum.defd.i & shadow.used.i)
8683 || (sum.defd.fp & shadow.used.fp)
8684 || (sum.defd.mem & shadow.used.mem))
8685 {
8686 /* (a) would be violated (also takes care of (b)) */
8687 gcc_assert (get_attr_trap (i) != TRAP_YES
8688 || (!(sum.defd.i & sum.used.i)
8689 && !(sum.defd.fp & sum.used.fp)));
8690
8691 goto close_shadow;
8692 }
8693 break;
8694
8695 case JUMP_INSN:
8696 case CALL_INSN:
8697 case CODE_LABEL:
8698 goto close_shadow;
8699
8700 default:
8701 gcc_unreachable ();
8702 }
8703 }
8704 else
8705 {
8706 close_shadow:
8707 n = emit_insn_before (gen_trapb (), i);
8708 PUT_MODE (n, TImode);
8709 PUT_MODE (i, TImode);
8710 trap_pending = 0;
8711 shadow.used.i = 0;
8712 shadow.used.fp = 0;
8713 shadow.used.mem = 0;
8714 shadow.defd = shadow.used;
8715 }
8716 }
8717 }
8718
8719 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8720 && GET_CODE (i) == INSN
8721 && GET_CODE (PATTERN (i)) != USE
8722 && GET_CODE (PATTERN (i)) != CLOBBER
8723 && get_attr_trap (i) == TRAP_YES)
8724 {
8725 if (optimize && !trap_pending)
8726 summarize_insn (PATTERN (i), &shadow, 0);
8727 trap_pending = 1;
8728 }
8729 }
8730 }
8731 \f
8732 /* Alpha can only issue instruction groups simultaneously if they are
8733 suitably aligned. This is very processor-specific. */
8734 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8735 that are marked "fake". These instructions do not exist on that target,
8736 but it is possible to see these insns with deranged combinations of
8737 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8738 choose a result at random. */
8739
8740 enum alphaev4_pipe {
8741 EV4_STOP = 0,
8742 EV4_IB0 = 1,
8743 EV4_IB1 = 2,
8744 EV4_IBX = 4
8745 };
8746
8747 enum alphaev5_pipe {
8748 EV5_STOP = 0,
8749 EV5_NONE = 1,
8750 EV5_E01 = 2,
8751 EV5_E0 = 4,
8752 EV5_E1 = 8,
8753 EV5_FAM = 16,
8754 EV5_FA = 32,
8755 EV5_FM = 64
8756 };
8757
8758 static enum alphaev4_pipe
8759 alphaev4_insn_pipe (rtx insn)
8760 {
8761 if (recog_memoized (insn) < 0)
8762 return EV4_STOP;
8763 if (get_attr_length (insn) != 4)
8764 return EV4_STOP;
8765
8766 switch (get_attr_type (insn))
8767 {
8768 case TYPE_ILD:
8769 case TYPE_LDSYM:
8770 case TYPE_FLD:
8771 case TYPE_LD_L:
8772 return EV4_IBX;
8773
8774 case TYPE_IADD:
8775 case TYPE_ILOG:
8776 case TYPE_ICMOV:
8777 case TYPE_ICMP:
8778 case TYPE_FST:
8779 case TYPE_SHIFT:
8780 case TYPE_IMUL:
8781 case TYPE_FBR:
8782 case TYPE_MVI: /* fake */
8783 return EV4_IB0;
8784
8785 case TYPE_IST:
8786 case TYPE_MISC:
8787 case TYPE_IBR:
8788 case TYPE_JSR:
8789 case TYPE_CALLPAL:
8790 case TYPE_FCPYS:
8791 case TYPE_FCMOV:
8792 case TYPE_FADD:
8793 case TYPE_FDIV:
8794 case TYPE_FMUL:
8795 case TYPE_ST_C:
8796 case TYPE_MB:
8797 case TYPE_FSQRT: /* fake */
8798 case TYPE_FTOI: /* fake */
8799 case TYPE_ITOF: /* fake */
8800 return EV4_IB1;
8801
8802 default:
8803 gcc_unreachable ();
8804 }
8805 }
8806
8807 static enum alphaev5_pipe
8808 alphaev5_insn_pipe (rtx insn)
8809 {
8810 if (recog_memoized (insn) < 0)
8811 return EV5_STOP;
8812 if (get_attr_length (insn) != 4)
8813 return EV5_STOP;
8814
8815 switch (get_attr_type (insn))
8816 {
8817 case TYPE_ILD:
8818 case TYPE_FLD:
8819 case TYPE_LDSYM:
8820 case TYPE_IADD:
8821 case TYPE_ILOG:
8822 case TYPE_ICMOV:
8823 case TYPE_ICMP:
8824 return EV5_E01;
8825
8826 case TYPE_IST:
8827 case TYPE_FST:
8828 case TYPE_SHIFT:
8829 case TYPE_IMUL:
8830 case TYPE_MISC:
8831 case TYPE_MVI:
8832 case TYPE_LD_L:
8833 case TYPE_ST_C:
8834 case TYPE_MB:
8835 case TYPE_FTOI: /* fake */
8836 case TYPE_ITOF: /* fake */
8837 return EV5_E0;
8838
8839 case TYPE_IBR:
8840 case TYPE_JSR:
8841 case TYPE_CALLPAL:
8842 return EV5_E1;
8843
8844 case TYPE_FCPYS:
8845 return EV5_FAM;
8846
8847 case TYPE_FBR:
8848 case TYPE_FCMOV:
8849 case TYPE_FADD:
8850 case TYPE_FDIV:
8851 case TYPE_FSQRT: /* fake */
8852 return EV5_FA;
8853
8854 case TYPE_FMUL:
8855 return EV5_FM;
8856
8857 default:
8858 gcc_unreachable ();
8859 }
8860 }
8861
8862 /* IN_USE is a mask of the slots currently filled within the insn group.
8863 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8864 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8865
8866 LEN is, of course, the length of the group in bytes. */
8867
8868 static rtx
8869 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8870 {
8871 int len, in_use;
8872
8873 len = in_use = 0;
8874
8875 if (! INSN_P (insn)
8876 || GET_CODE (PATTERN (insn)) == CLOBBER
8877 || GET_CODE (PATTERN (insn)) == USE)
8878 goto next_and_done;
8879
8880 while (1)
8881 {
8882 enum alphaev4_pipe pipe;
8883
8884 pipe = alphaev4_insn_pipe (insn);
8885 switch (pipe)
8886 {
8887 case EV4_STOP:
8888 /* Force complex instructions to start new groups. */
8889 if (in_use)
8890 goto done;
8891
8892 /* If this is a completely unrecognized insn, it's an asm.
8893 We don't know how long it is, so record length as -1 to
8894 signal a needed realignment. */
8895 if (recog_memoized (insn) < 0)
8896 len = -1;
8897 else
8898 len = get_attr_length (insn);
8899 goto next_and_done;
8900
8901 case EV4_IBX:
8902 if (in_use & EV4_IB0)
8903 {
8904 if (in_use & EV4_IB1)
8905 goto done;
8906 in_use |= EV4_IB1;
8907 }
8908 else
8909 in_use |= EV4_IB0 | EV4_IBX;
8910 break;
8911
8912 case EV4_IB0:
8913 if (in_use & EV4_IB0)
8914 {
8915 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8916 goto done;
8917 in_use |= EV4_IB1;
8918 }
8919 in_use |= EV4_IB0;
8920 break;
8921
8922 case EV4_IB1:
8923 if (in_use & EV4_IB1)
8924 goto done;
8925 in_use |= EV4_IB1;
8926 break;
8927
8928 default:
8929 gcc_unreachable ();
8930 }
8931 len += 4;
8932
8933 /* Haifa doesn't do well scheduling branches. */
8934 if (GET_CODE (insn) == JUMP_INSN)
8935 goto next_and_done;
8936
8937 next:
8938 insn = next_nonnote_insn (insn);
8939
8940 if (!insn || ! INSN_P (insn))
8941 goto done;
8942
8943 /* Let Haifa tell us where it thinks insn group boundaries are. */
8944 if (GET_MODE (insn) == TImode)
8945 goto done;
8946
8947 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8948 goto next;
8949 }
8950
8951 next_and_done:
8952 insn = next_nonnote_insn (insn);
8953
8954 done:
8955 *plen = len;
8956 *pin_use = in_use;
8957 return insn;
8958 }
8959
8960 /* IN_USE is a mask of the slots currently filled within the insn group.
8961 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8962 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8963
8964 LEN is, of course, the length of the group in bytes. */
8965
8966 static rtx
8967 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8968 {
8969 int len, in_use;
8970
8971 len = in_use = 0;
8972
8973 if (! INSN_P (insn)
8974 || GET_CODE (PATTERN (insn)) == CLOBBER
8975 || GET_CODE (PATTERN (insn)) == USE)
8976 goto next_and_done;
8977
8978 while (1)
8979 {
8980 enum alphaev5_pipe pipe;
8981
8982 pipe = alphaev5_insn_pipe (insn);
8983 switch (pipe)
8984 {
8985 case EV5_STOP:
8986 /* Force complex instructions to start new groups. */
8987 if (in_use)
8988 goto done;
8989
8990 /* If this is a completely unrecognized insn, it's an asm.
8991 We don't know how long it is, so record length as -1 to
8992 signal a needed realignment. */
8993 if (recog_memoized (insn) < 0)
8994 len = -1;
8995 else
8996 len = get_attr_length (insn);
8997 goto next_and_done;
8998
8999 /* ??? Most of the places below, we would like to assert never
9000 happen, as it would indicate an error either in Haifa, or
9001 in the scheduling description. Unfortunately, Haifa never
9002 schedules the last instruction of the BB, so we don't have
9003 an accurate TI bit to go off. */
9004 case EV5_E01:
9005 if (in_use & EV5_E0)
9006 {
9007 if (in_use & EV5_E1)
9008 goto done;
9009 in_use |= EV5_E1;
9010 }
9011 else
9012 in_use |= EV5_E0 | EV5_E01;
9013 break;
9014
9015 case EV5_E0:
9016 if (in_use & EV5_E0)
9017 {
9018 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9019 goto done;
9020 in_use |= EV5_E1;
9021 }
9022 in_use |= EV5_E0;
9023 break;
9024
9025 case EV5_E1:
9026 if (in_use & EV5_E1)
9027 goto done;
9028 in_use |= EV5_E1;
9029 break;
9030
9031 case EV5_FAM:
9032 if (in_use & EV5_FA)
9033 {
9034 if (in_use & EV5_FM)
9035 goto done;
9036 in_use |= EV5_FM;
9037 }
9038 else
9039 in_use |= EV5_FA | EV5_FAM;
9040 break;
9041
9042 case EV5_FA:
9043 if (in_use & EV5_FA)
9044 goto done;
9045 in_use |= EV5_FA;
9046 break;
9047
9048 case EV5_FM:
9049 if (in_use & EV5_FM)
9050 goto done;
9051 in_use |= EV5_FM;
9052 break;
9053
9054 case EV5_NONE:
9055 break;
9056
9057 default:
9058 gcc_unreachable ();
9059 }
9060 len += 4;
9061
9062 /* Haifa doesn't do well scheduling branches. */
9063 /* ??? If this is predicted not-taken, slotting continues, except
9064 that no more IBR, FBR, or JSR insns may be slotted. */
9065 if (GET_CODE (insn) == JUMP_INSN)
9066 goto next_and_done;
9067
9068 next:
9069 insn = next_nonnote_insn (insn);
9070
9071 if (!insn || ! INSN_P (insn))
9072 goto done;
9073
9074 /* Let Haifa tell us where it thinks insn group boundaries are. */
9075 if (GET_MODE (insn) == TImode)
9076 goto done;
9077
9078 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9079 goto next;
9080 }
9081
9082 next_and_done:
9083 insn = next_nonnote_insn (insn);
9084
9085 done:
9086 *plen = len;
9087 *pin_use = in_use;
9088 return insn;
9089 }
9090
9091 static rtx
9092 alphaev4_next_nop (int *pin_use)
9093 {
9094 int in_use = *pin_use;
9095 rtx nop;
9096
9097 if (!(in_use & EV4_IB0))
9098 {
9099 in_use |= EV4_IB0;
9100 nop = gen_nop ();
9101 }
9102 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9103 {
9104 in_use |= EV4_IB1;
9105 nop = gen_nop ();
9106 }
9107 else if (TARGET_FP && !(in_use & EV4_IB1))
9108 {
9109 in_use |= EV4_IB1;
9110 nop = gen_fnop ();
9111 }
9112 else
9113 nop = gen_unop ();
9114
9115 *pin_use = in_use;
9116 return nop;
9117 }
9118
9119 static rtx
9120 alphaev5_next_nop (int *pin_use)
9121 {
9122 int in_use = *pin_use;
9123 rtx nop;
9124
9125 if (!(in_use & EV5_E1))
9126 {
9127 in_use |= EV5_E1;
9128 nop = gen_nop ();
9129 }
9130 else if (TARGET_FP && !(in_use & EV5_FA))
9131 {
9132 in_use |= EV5_FA;
9133 nop = gen_fnop ();
9134 }
9135 else if (TARGET_FP && !(in_use & EV5_FM))
9136 {
9137 in_use |= EV5_FM;
9138 nop = gen_fnop ();
9139 }
9140 else
9141 nop = gen_unop ();
9142
9143 *pin_use = in_use;
9144 return nop;
9145 }
9146
9147 /* The instruction group alignment main loop. */
9148
9149 static void
9150 alpha_align_insns (unsigned int max_align,
9151 rtx (*next_group) (rtx, int *, int *),
9152 rtx (*next_nop) (int *))
9153 {
9154 /* ALIGN is the known alignment for the insn group. */
9155 unsigned int align;
9156 /* OFS is the offset of the current insn in the insn group. */
9157 int ofs;
9158 int prev_in_use, in_use, len, ldgp;
9159 rtx i, next;
9160
9161 /* Let shorten branches care for assigning alignments to code labels. */
9162 shorten_branches (get_insns ());
9163
9164 if (align_functions < 4)
9165 align = 4;
9166 else if ((unsigned int) align_functions < max_align)
9167 align = align_functions;
9168 else
9169 align = max_align;
9170
9171 ofs = prev_in_use = 0;
9172 i = get_insns ();
9173 if (GET_CODE (i) == NOTE)
9174 i = next_nonnote_insn (i);
9175
9176 ldgp = alpha_function_needs_gp ? 8 : 0;
9177
9178 while (i)
9179 {
9180 next = (*next_group) (i, &in_use, &len);
9181
9182 /* When we see a label, resync alignment etc. */
9183 if (GET_CODE (i) == CODE_LABEL)
9184 {
9185 unsigned int new_align = 1 << label_to_alignment (i);
9186
9187 if (new_align >= align)
9188 {
9189 align = new_align < max_align ? new_align : max_align;
9190 ofs = 0;
9191 }
9192
9193 else if (ofs & (new_align-1))
9194 ofs = (ofs | (new_align-1)) + 1;
9195 gcc_assert (!len);
9196 }
9197
9198 /* Handle complex instructions special. */
9199 else if (in_use == 0)
9200 {
9201 /* Asms will have length < 0. This is a signal that we have
9202 lost alignment knowledge. Assume, however, that the asm
9203 will not mis-align instructions. */
9204 if (len < 0)
9205 {
9206 ofs = 0;
9207 align = 4;
9208 len = 0;
9209 }
9210 }
9211
9212 /* If the known alignment is smaller than the recognized insn group,
9213 realign the output. */
9214 else if ((int) align < len)
9215 {
9216 unsigned int new_log_align = len > 8 ? 4 : 3;
9217 rtx prev, where;
9218
9219 where = prev = prev_nonnote_insn (i);
9220 if (!where || GET_CODE (where) != CODE_LABEL)
9221 where = i;
9222
9223 /* Can't realign between a call and its gp reload. */
9224 if (! (TARGET_EXPLICIT_RELOCS
9225 && prev && GET_CODE (prev) == CALL_INSN))
9226 {
9227 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9228 align = 1 << new_log_align;
9229 ofs = 0;
9230 }
9231 }
9232
9233 /* We may not insert padding inside the initial ldgp sequence. */
9234 else if (ldgp > 0)
9235 ldgp -= len;
9236
9237 /* If the group won't fit in the same INT16 as the previous,
9238 we need to add padding to keep the group together. Rather
9239 than simply leaving the insn filling to the assembler, we
9240 can make use of the knowledge of what sorts of instructions
9241 were issued in the previous group to make sure that all of
9242 the added nops are really free. */
9243 else if (ofs + len > (int) align)
9244 {
9245 int nop_count = (align - ofs) / 4;
9246 rtx where;
9247
9248 /* Insert nops before labels, branches, and calls to truly merge
9249 the execution of the nops with the previous instruction group. */
9250 where = prev_nonnote_insn (i);
9251 if (where)
9252 {
9253 if (GET_CODE (where) == CODE_LABEL)
9254 {
9255 rtx where2 = prev_nonnote_insn (where);
9256 if (where2 && GET_CODE (where2) == JUMP_INSN)
9257 where = where2;
9258 }
9259 else if (GET_CODE (where) == INSN)
9260 where = i;
9261 }
9262 else
9263 where = i;
9264
9265 do
9266 emit_insn_before ((*next_nop)(&prev_in_use), where);
9267 while (--nop_count);
9268 ofs = 0;
9269 }
9270
9271 ofs = (ofs + len) & (align - 1);
9272 prev_in_use = in_use;
9273 i = next;
9274 }
9275 }
9276 \f
9277 /* Machine dependent reorg pass. */
9278
9279 static void
9280 alpha_reorg (void)
9281 {
9282 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9283 alpha_handle_trap_shadows ();
9284
9285 /* Due to the number of extra trapb insns, don't bother fixing up
9286 alignment when trap precision is instruction. Moreover, we can
9287 only do our job when sched2 is run. */
9288 if (optimize && !optimize_size
9289 && alpha_tp != ALPHA_TP_INSN
9290 && flag_schedule_insns_after_reload)
9291 {
9292 if (alpha_tune == PROCESSOR_EV4)
9293 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9294 else if (alpha_tune == PROCESSOR_EV5)
9295 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9296 }
9297 }
9298 \f
9299 #if !TARGET_ABI_UNICOSMK
9300
9301 #ifdef HAVE_STAMP_H
9302 #include <stamp.h>
9303 #endif
9304
9305 static void
9306 alpha_file_start (void)
9307 {
9308 #ifdef OBJECT_FORMAT_ELF
9309 /* If emitting dwarf2 debug information, we cannot generate a .file
9310 directive to start the file, as it will conflict with dwarf2out
9311 file numbers. So it's only useful when emitting mdebug output. */
9312 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9313 #endif
9314
9315 default_file_start ();
9316 #ifdef MS_STAMP
9317 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9318 #endif
9319
9320 fputs ("\t.set noreorder\n", asm_out_file);
9321 fputs ("\t.set volatile\n", asm_out_file);
9322 if (!TARGET_ABI_OPEN_VMS)
9323 fputs ("\t.set noat\n", asm_out_file);
9324 if (TARGET_EXPLICIT_RELOCS)
9325 fputs ("\t.set nomacro\n", asm_out_file);
9326 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9327 {
9328 const char *arch;
9329
9330 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9331 arch = "ev6";
9332 else if (TARGET_MAX)
9333 arch = "pca56";
9334 else if (TARGET_BWX)
9335 arch = "ev56";
9336 else if (alpha_cpu == PROCESSOR_EV5)
9337 arch = "ev5";
9338 else
9339 arch = "ev4";
9340
9341 fprintf (asm_out_file, "\t.arch %s\n", arch);
9342 }
9343 }
9344 #endif
9345
9346 #ifdef OBJECT_FORMAT_ELF
9347
9348 /* Switch to the section to which we should output X. The only thing
9349 special we do here is to honor small data. */
9350
9351 static void
9352 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9353 unsigned HOST_WIDE_INT align)
9354 {
9355 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9356 /* ??? Consider using mergeable sdata sections. */
9357 sdata_section ();
9358 else
9359 default_elf_select_rtx_section (mode, x, align);
9360 }
9361
9362 #endif /* OBJECT_FORMAT_ELF */
9363 \f
9364 /* Structure to collect function names for final output in link section. */
9365 /* Note that items marked with GTY can't be ifdef'ed out. */
9366
9367 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9368 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9369
9370 struct alpha_links GTY(())
9371 {
9372 int num;
9373 rtx linkage;
9374 enum links_kind lkind;
9375 enum reloc_kind rkind;
9376 };
9377
9378 struct alpha_funcs GTY(())
9379 {
9380 int num;
9381 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9382 links;
9383 };
9384
9385 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9386 splay_tree alpha_links_tree;
9387 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9388 splay_tree alpha_funcs_tree;
9389
9390 static GTY(()) int alpha_funcs_num;
9391
9392 #if TARGET_ABI_OPEN_VMS
9393
9394 /* Return the VMS argument type corresponding to MODE. */
9395
9396 enum avms_arg_type
9397 alpha_arg_type (enum machine_mode mode)
9398 {
9399 switch (mode)
9400 {
9401 case SFmode:
9402 return TARGET_FLOAT_VAX ? FF : FS;
9403 case DFmode:
9404 return TARGET_FLOAT_VAX ? FD : FT;
9405 default:
9406 return I64;
9407 }
9408 }
9409
9410 /* Return an rtx for an integer representing the VMS Argument Information
9411 register value. */
9412
9413 rtx
9414 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9415 {
9416 unsigned HOST_WIDE_INT regval = cum.num_args;
9417 int i;
9418
9419 for (i = 0; i < 6; i++)
9420 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9421
9422 return GEN_INT (regval);
9423 }
9424 \f
9425 /* Make (or fake) .linkage entry for function call.
9426
9427 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9428
9429 Return an SYMBOL_REF rtx for the linkage. */
9430
9431 rtx
9432 alpha_need_linkage (const char *name, int is_local)
9433 {
9434 splay_tree_node node;
9435 struct alpha_links *al;
9436
9437 if (name[0] == '*')
9438 name++;
9439
9440 if (is_local)
9441 {
9442 struct alpha_funcs *cfaf;
9443
9444 if (!alpha_funcs_tree)
9445 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9446 splay_tree_compare_pointers);
9447
9448 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9449
9450 cfaf->links = 0;
9451 cfaf->num = ++alpha_funcs_num;
9452
9453 splay_tree_insert (alpha_funcs_tree,
9454 (splay_tree_key) current_function_decl,
9455 (splay_tree_value) cfaf);
9456 }
9457
9458 if (alpha_links_tree)
9459 {
9460 /* Is this name already defined? */
9461
9462 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9463 if (node)
9464 {
9465 al = (struct alpha_links *) node->value;
9466 if (is_local)
9467 {
9468 /* Defined here but external assumed. */
9469 if (al->lkind == KIND_EXTERN)
9470 al->lkind = KIND_LOCAL;
9471 }
9472 else
9473 {
9474 /* Used here but unused assumed. */
9475 if (al->lkind == KIND_UNUSED)
9476 al->lkind = KIND_LOCAL;
9477 }
9478 return al->linkage;
9479 }
9480 }
9481 else
9482 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9483
9484 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9485 name = ggc_strdup (name);
9486
9487 /* Assume external if no definition. */
9488 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9489
9490 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9491 get_identifier (name);
9492
9493 /* Construct a SYMBOL_REF for us to call. */
9494 {
9495 size_t name_len = strlen (name);
9496 char *linksym = alloca (name_len + 6);
9497 linksym[0] = '$';
9498 memcpy (linksym + 1, name, name_len);
9499 memcpy (linksym + 1 + name_len, "..lk", 5);
9500 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9501 ggc_alloc_string (linksym, name_len + 5));
9502 }
9503
9504 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9505 (splay_tree_value) al);
9506
9507 return al->linkage;
9508 }
9509
9510 rtx
9511 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9512 {
9513 splay_tree_node cfunnode;
9514 struct alpha_funcs *cfaf;
9515 struct alpha_links *al;
9516 const char *name = XSTR (linkage, 0);
9517
9518 cfaf = (struct alpha_funcs *) 0;
9519 al = (struct alpha_links *) 0;
9520
9521 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9522 cfaf = (struct alpha_funcs *) cfunnode->value;
9523
9524 if (cfaf->links)
9525 {
9526 splay_tree_node lnode;
9527
9528 /* Is this name already defined? */
9529
9530 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9531 if (lnode)
9532 al = (struct alpha_links *) lnode->value;
9533 }
9534 else
9535 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9536
9537 if (!al)
9538 {
9539 size_t name_len;
9540 size_t buflen;
9541 char buf [512];
9542 char *linksym;
9543 splay_tree_node node = 0;
9544 struct alpha_links *anl;
9545
9546 if (name[0] == '*')
9547 name++;
9548
9549 name_len = strlen (name);
9550
9551 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9552 al->num = cfaf->num;
9553
9554 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9555 if (node)
9556 {
9557 anl = (struct alpha_links *) node->value;
9558 al->lkind = anl->lkind;
9559 }
9560
9561 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9562 buflen = strlen (buf);
9563 linksym = alloca (buflen + 1);
9564 memcpy (linksym, buf, buflen + 1);
9565
9566 al->linkage = gen_rtx_SYMBOL_REF
9567 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9568
9569 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9570 (splay_tree_value) al);
9571 }
9572
9573 if (rflag)
9574 al->rkind = KIND_CODEADDR;
9575 else
9576 al->rkind = KIND_LINKAGE;
9577
9578 if (lflag)
9579 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9580 else
9581 return al->linkage;
9582 }
9583
9584 static int
9585 alpha_write_one_linkage (splay_tree_node node, void *data)
9586 {
9587 const char *const name = (const char *) node->key;
9588 struct alpha_links *link = (struct alpha_links *) node->value;
9589 FILE *stream = (FILE *) data;
9590
9591 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9592 if (link->rkind == KIND_CODEADDR)
9593 {
9594 if (link->lkind == KIND_LOCAL)
9595 {
9596 /* Local and used */
9597 fprintf (stream, "\t.quad %s..en\n", name);
9598 }
9599 else
9600 {
9601 /* External and used, request code address. */
9602 fprintf (stream, "\t.code_address %s\n", name);
9603 }
9604 }
9605 else
9606 {
9607 if (link->lkind == KIND_LOCAL)
9608 {
9609 /* Local and used, build linkage pair. */
9610 fprintf (stream, "\t.quad %s..en\n", name);
9611 fprintf (stream, "\t.quad %s\n", name);
9612 }
9613 else
9614 {
9615 /* External and used, request linkage pair. */
9616 fprintf (stream, "\t.linkage %s\n", name);
9617 }
9618 }
9619
9620 return 0;
9621 }
9622
9623 static void
9624 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9625 {
9626 splay_tree_node node;
9627 struct alpha_funcs *func;
9628
9629 link_section ();
9630 fprintf (stream, "\t.align 3\n");
9631 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9632 func = (struct alpha_funcs *) node->value;
9633
9634 fputs ("\t.name ", stream);
9635 assemble_name (stream, funname);
9636 fputs ("..na\n", stream);
9637 ASM_OUTPUT_LABEL (stream, funname);
9638 fprintf (stream, "\t.pdesc ");
9639 assemble_name (stream, funname);
9640 fprintf (stream, "..en,%s\n",
9641 alpha_procedure_type == PT_STACK ? "stack"
9642 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9643
9644 if (func->links)
9645 {
9646 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9647 /* splay_tree_delete (func->links); */
9648 }
9649 }
9650
9651 /* Given a decl, a section name, and whether the decl initializer
9652 has relocs, choose attributes for the section. */
9653
9654 #define SECTION_VMS_OVERLAY SECTION_FORGET
9655 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9656 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9657
9658 static unsigned int
9659 vms_section_type_flags (tree decl, const char *name, int reloc)
9660 {
9661 unsigned int flags = default_section_type_flags (decl, name, reloc);
9662
9663 if (decl && DECL_ATTRIBUTES (decl)
9664 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9665 flags |= SECTION_VMS_OVERLAY;
9666 if (decl && DECL_ATTRIBUTES (decl)
9667 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9668 flags |= SECTION_VMS_GLOBAL;
9669 if (decl && DECL_ATTRIBUTES (decl)
9670 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9671 flags |= SECTION_VMS_INITIALIZE;
9672
9673 return flags;
9674 }
9675
9676 /* Switch to an arbitrary section NAME with attributes as specified
9677 by FLAGS. ALIGN specifies any known alignment requirements for
9678 the section; 0 if the default should be used. */
9679
9680 static void
9681 vms_asm_named_section (const char *name, unsigned int flags,
9682 tree decl ATTRIBUTE_UNUSED)
9683 {
9684 fputc ('\n', asm_out_file);
9685 fprintf (asm_out_file, ".section\t%s", name);
9686
9687 if (flags & SECTION_VMS_OVERLAY)
9688 fprintf (asm_out_file, ",OVR");
9689 if (flags & SECTION_VMS_GLOBAL)
9690 fprintf (asm_out_file, ",GBL");
9691 if (flags & SECTION_VMS_INITIALIZE)
9692 fprintf (asm_out_file, ",NOMOD");
9693 if (flags & SECTION_DEBUG)
9694 fprintf (asm_out_file, ",NOWRT");
9695
9696 fputc ('\n', asm_out_file);
9697 }
9698
9699 /* Record an element in the table of global constructors. SYMBOL is
9700 a SYMBOL_REF of the function to be called; PRIORITY is a number
9701 between 0 and MAX_INIT_PRIORITY.
9702
9703 Differs from default_ctors_section_asm_out_constructor in that the
9704 width of the .ctors entry is always 64 bits, rather than the 32 bits
9705 used by a normal pointer. */
9706
9707 static void
9708 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9709 {
9710 ctors_section ();
9711 assemble_align (BITS_PER_WORD);
9712 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9713 }
9714
9715 static void
9716 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9717 {
9718 dtors_section ();
9719 assemble_align (BITS_PER_WORD);
9720 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9721 }
9722 #else
9723
9724 rtx
9725 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9726 int is_local ATTRIBUTE_UNUSED)
9727 {
9728 return NULL_RTX;
9729 }
9730
9731 rtx
9732 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9733 tree cfundecl ATTRIBUTE_UNUSED,
9734 int lflag ATTRIBUTE_UNUSED,
9735 int rflag ATTRIBUTE_UNUSED)
9736 {
9737 return NULL_RTX;
9738 }
9739
9740 #endif /* TARGET_ABI_OPEN_VMS */
9741 \f
9742 #if TARGET_ABI_UNICOSMK
9743
9744 /* This evaluates to true if we do not know how to pass TYPE solely in
9745 registers. This is the case for all arguments that do not fit in two
9746 registers. */
9747
9748 static bool
9749 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9750 {
9751 if (type == NULL)
9752 return false;
9753
9754 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9755 return true;
9756 if (TREE_ADDRESSABLE (type))
9757 return true;
9758
9759 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9760 }
9761
9762 /* Define the offset between two registers, one to be eliminated, and the
9763 other its replacement, at the start of a routine. */
9764
9765 int
9766 unicosmk_initial_elimination_offset (int from, int to)
9767 {
9768 int fixed_size;
9769
9770 fixed_size = alpha_sa_size();
9771 if (fixed_size != 0)
9772 fixed_size += 48;
9773
9774 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9775 return -fixed_size;
9776 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9777 return 0;
9778 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9779 return (ALPHA_ROUND (current_function_outgoing_args_size)
9780 + ALPHA_ROUND (get_frame_size()));
9781 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9782 return (ALPHA_ROUND (fixed_size)
9783 + ALPHA_ROUND (get_frame_size()
9784 + current_function_outgoing_args_size));
9785 else
9786 gcc_unreachable ();
9787 }
9788
9789 /* Output the module name for .ident and .end directives. We have to strip
9790 directories and add make sure that the module name starts with a letter
9791 or '$'. */
9792
9793 static void
9794 unicosmk_output_module_name (FILE *file)
9795 {
9796 const char *name = lbasename (main_input_filename);
9797 unsigned len = strlen (name);
9798 char *clean_name = alloca (len + 2);
9799 char *ptr = clean_name;
9800
9801 /* CAM only accepts module names that start with a letter or '$'. We
9802 prefix the module name with a '$' if necessary. */
9803
9804 if (!ISALPHA (*name))
9805 *ptr++ = '$';
9806 memcpy (ptr, name, len + 1);
9807 clean_symbol_name (clean_name);
9808 fputs (clean_name, file);
9809 }
9810
9811 /* Output the definition of a common variable. */
9812
9813 void
9814 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9815 {
9816 tree name_tree;
9817 printf ("T3E__: common %s\n", name);
9818
9819 common_section ();
9820 fputs("\t.endp\n\n\t.psect ", file);
9821 assemble_name(file, name);
9822 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9823 fprintf(file, "\t.byte\t0:%d\n", size);
9824
9825 /* Mark the symbol as defined in this module. */
9826 name_tree = get_identifier (name);
9827 TREE_ASM_WRITTEN (name_tree) = 1;
9828 }
9829
9830 #define SECTION_PUBLIC SECTION_MACH_DEP
9831 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9832 static int current_section_align;
9833
9834 static unsigned int
9835 unicosmk_section_type_flags (tree decl, const char *name,
9836 int reloc ATTRIBUTE_UNUSED)
9837 {
9838 unsigned int flags = default_section_type_flags (decl, name, reloc);
9839
9840 if (!decl)
9841 return flags;
9842
9843 if (TREE_CODE (decl) == FUNCTION_DECL)
9844 {
9845 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9846 if (align_functions_log > current_section_align)
9847 current_section_align = align_functions_log;
9848
9849 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9850 flags |= SECTION_MAIN;
9851 }
9852 else
9853 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9854
9855 if (TREE_PUBLIC (decl))
9856 flags |= SECTION_PUBLIC;
9857
9858 return flags;
9859 }
9860
9861 /* Generate a section name for decl and associate it with the
9862 declaration. */
9863
9864 static void
9865 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9866 {
9867 const char *name;
9868 int len;
9869
9870 gcc_assert (decl);
9871
9872 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9873 name = default_strip_name_encoding (name);
9874 len = strlen (name);
9875
9876 if (TREE_CODE (decl) == FUNCTION_DECL)
9877 {
9878 char *string;
9879
9880 /* It is essential that we prefix the section name here because
9881 otherwise the section names generated for constructors and
9882 destructors confuse collect2. */
9883
9884 string = alloca (len + 6);
9885 sprintf (string, "code@%s", name);
9886 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9887 }
9888 else if (TREE_PUBLIC (decl))
9889 DECL_SECTION_NAME (decl) = build_string (len, name);
9890 else
9891 {
9892 char *string;
9893
9894 string = alloca (len + 6);
9895 sprintf (string, "data@%s", name);
9896 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9897 }
9898 }
9899
9900 /* Switch to an arbitrary section NAME with attributes as specified
9901 by FLAGS. ALIGN specifies any known alignment requirements for
9902 the section; 0 if the default should be used. */
9903
9904 static void
9905 unicosmk_asm_named_section (const char *name, unsigned int flags,
9906 tree decl ATTRIBUTE_UNUSED)
9907 {
9908 const char *kind;
9909
9910 /* Close the previous section. */
9911
9912 fputs ("\t.endp\n\n", asm_out_file);
9913
9914 /* Find out what kind of section we are opening. */
9915
9916 if (flags & SECTION_MAIN)
9917 fputs ("\t.start\tmain\n", asm_out_file);
9918
9919 if (flags & SECTION_CODE)
9920 kind = "code";
9921 else if (flags & SECTION_PUBLIC)
9922 kind = "common";
9923 else
9924 kind = "data";
9925
9926 if (current_section_align != 0)
9927 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9928 current_section_align, kind);
9929 else
9930 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9931 }
9932
9933 static void
9934 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9935 {
9936 if (DECL_P (decl)
9937 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9938 unicosmk_unique_section (decl, 0);
9939 }
9940
9941 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9942 in code sections because .align fill unused space with zeroes. */
9943
9944 void
9945 unicosmk_output_align (FILE *file, int align)
9946 {
9947 if (inside_function)
9948 fprintf (file, "\tgcc@code@align\t%d\n", align);
9949 else
9950 fprintf (file, "\t.align\t%d\n", align);
9951 }
9952
9953 /* Add a case vector to the current function's list of deferred case
9954 vectors. Case vectors have to be put into a separate section because CAM
9955 does not allow data definitions in code sections. */
9956
9957 void
9958 unicosmk_defer_case_vector (rtx lab, rtx vec)
9959 {
9960 struct machine_function *machine = cfun->machine;
9961
9962 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9963 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9964 machine->addr_list);
9965 }
9966
9967 /* Output a case vector. */
9968
9969 static void
9970 unicosmk_output_addr_vec (FILE *file, rtx vec)
9971 {
9972 rtx lab = XEXP (vec, 0);
9973 rtx body = XEXP (vec, 1);
9974 int vlen = XVECLEN (body, 0);
9975 int idx;
9976
9977 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9978
9979 for (idx = 0; idx < vlen; idx++)
9980 {
9981 ASM_OUTPUT_ADDR_VEC_ELT
9982 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9983 }
9984 }
9985
9986 /* Output current function's deferred case vectors. */
9987
9988 static void
9989 unicosmk_output_deferred_case_vectors (FILE *file)
9990 {
9991 struct machine_function *machine = cfun->machine;
9992 rtx t;
9993
9994 if (machine->addr_list == NULL_RTX)
9995 return;
9996
9997 data_section ();
9998 for (t = machine->addr_list; t; t = XEXP (t, 1))
9999 unicosmk_output_addr_vec (file, XEXP (t, 0));
10000 }
10001
10002 /* Generate the name of the SSIB section for the current function. */
10003
10004 #define SSIB_PREFIX "__SSIB_"
10005 #define SSIB_PREFIX_LEN 7
10006
10007 static const char *
10008 unicosmk_ssib_name (void)
10009 {
10010 /* This is ok since CAM won't be able to deal with names longer than that
10011 anyway. */
10012
10013 static char name[256];
10014
10015 rtx x;
10016 const char *fnname;
10017 int len;
10018
10019 x = DECL_RTL (cfun->decl);
10020 gcc_assert (GET_CODE (x) == MEM);
10021 x = XEXP (x, 0);
10022 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10023 fnname = XSTR (x, 0);
10024
10025 len = strlen (fnname);
10026 if (len + SSIB_PREFIX_LEN > 255)
10027 len = 255 - SSIB_PREFIX_LEN;
10028
10029 strcpy (name, SSIB_PREFIX);
10030 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10031 name[len + SSIB_PREFIX_LEN] = 0;
10032
10033 return name;
10034 }
10035
10036 /* Set up the dynamic subprogram information block (DSIB) and update the
10037 frame pointer register ($15) for subroutines which have a frame. If the
10038 subroutine doesn't have a frame, simply increment $15. */
10039
10040 static void
10041 unicosmk_gen_dsib (unsigned long *imaskP)
10042 {
10043 if (alpha_procedure_type == PT_STACK)
10044 {
10045 const char *ssib_name;
10046 rtx mem;
10047
10048 /* Allocate 64 bytes for the DSIB. */
10049
10050 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10051 GEN_INT (-64))));
10052 emit_insn (gen_blockage ());
10053
10054 /* Save the return address. */
10055
10056 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10057 set_mem_alias_set (mem, alpha_sr_alias_set);
10058 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10059 (*imaskP) &= ~(1UL << REG_RA);
10060
10061 /* Save the old frame pointer. */
10062
10063 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10064 set_mem_alias_set (mem, alpha_sr_alias_set);
10065 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10066 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10067
10068 emit_insn (gen_blockage ());
10069
10070 /* Store the SSIB pointer. */
10071
10072 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10073 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10074 set_mem_alias_set (mem, alpha_sr_alias_set);
10075
10076 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10077 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10078 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10079
10080 /* Save the CIW index. */
10081
10082 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10083 set_mem_alias_set (mem, alpha_sr_alias_set);
10084 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10085
10086 emit_insn (gen_blockage ());
10087
10088 /* Set the new frame pointer. */
10089
10090 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10091 stack_pointer_rtx, GEN_INT (64))));
10092
10093 }
10094 else
10095 {
10096 /* Increment the frame pointer register to indicate that we do not
10097 have a frame. */
10098
10099 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10100 hard_frame_pointer_rtx, const1_rtx)));
10101 }
10102 }
10103
10104 /* Output the static subroutine information block for the current
10105 function. */
10106
10107 static void
10108 unicosmk_output_ssib (FILE *file, const char *fnname)
10109 {
10110 int len;
10111 int i;
10112 rtx x;
10113 rtx ciw;
10114 struct machine_function *machine = cfun->machine;
10115
10116 ssib_section ();
10117 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10118 unicosmk_ssib_name ());
10119
10120 /* Some required stuff and the function name length. */
10121
10122 len = strlen (fnname);
10123 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10124
10125 /* Saved registers
10126 ??? We don't do that yet. */
10127
10128 fputs ("\t.quad\t0\n", file);
10129
10130 /* Function address. */
10131
10132 fputs ("\t.quad\t", file);
10133 assemble_name (file, fnname);
10134 putc ('\n', file);
10135
10136 fputs ("\t.quad\t0\n", file);
10137 fputs ("\t.quad\t0\n", file);
10138
10139 /* Function name.
10140 ??? We do it the same way Cray CC does it but this could be
10141 simplified. */
10142
10143 for( i = 0; i < len; i++ )
10144 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10145 if( (len % 8) == 0 )
10146 fputs ("\t.quad\t0\n", file);
10147 else
10148 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10149
10150 /* All call information words used in the function. */
10151
10152 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10153 {
10154 ciw = XEXP (x, 0);
10155 #if HOST_BITS_PER_WIDE_INT == 32
10156 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10157 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10158 #else
10159 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10160 #endif
10161 }
10162 }
10163
10164 /* Add a call information word (CIW) to the list of the current function's
10165 CIWs and return its index.
10166
10167 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10168
10169 rtx
10170 unicosmk_add_call_info_word (rtx x)
10171 {
10172 rtx node;
10173 struct machine_function *machine = cfun->machine;
10174
10175 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10176 if (machine->first_ciw == NULL_RTX)
10177 machine->first_ciw = node;
10178 else
10179 XEXP (machine->last_ciw, 1) = node;
10180
10181 machine->last_ciw = node;
10182 ++machine->ciw_count;
10183
10184 return GEN_INT (machine->ciw_count
10185 + strlen (current_function_name ())/8 + 5);
10186 }
10187
10188 static char unicosmk_section_buf[100];
10189
10190 char *
10191 unicosmk_text_section (void)
10192 {
10193 static int count = 0;
10194 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
10195 count++);
10196 return unicosmk_section_buf;
10197 }
10198
10199 char *
10200 unicosmk_data_section (void)
10201 {
10202 static int count = 1;
10203 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
10204 count++);
10205 return unicosmk_section_buf;
10206 }
10207
10208 /* The Cray assembler doesn't accept extern declarations for symbols which
10209 are defined in the same file. We have to keep track of all global
10210 symbols which are referenced and/or defined in a source file and output
10211 extern declarations for those which are referenced but not defined at
10212 the end of file. */
10213
10214 /* List of identifiers for which an extern declaration might have to be
10215 emitted. */
10216 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10217
10218 struct unicosmk_extern_list
10219 {
10220 struct unicosmk_extern_list *next;
10221 const char *name;
10222 };
10223
10224 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10225
10226 /* Output extern declarations which are required for every asm file. */
10227
10228 static void
10229 unicosmk_output_default_externs (FILE *file)
10230 {
10231 static const char *const externs[] =
10232 { "__T3E_MISMATCH" };
10233
10234 int i;
10235 int n;
10236
10237 n = ARRAY_SIZE (externs);
10238
10239 for (i = 0; i < n; i++)
10240 fprintf (file, "\t.extern\t%s\n", externs[i]);
10241 }
10242
10243 /* Output extern declarations for global symbols which are have been
10244 referenced but not defined. */
10245
10246 static void
10247 unicosmk_output_externs (FILE *file)
10248 {
10249 struct unicosmk_extern_list *p;
10250 const char *real_name;
10251 int len;
10252 tree name_tree;
10253
10254 len = strlen (user_label_prefix);
10255 for (p = unicosmk_extern_head; p != 0; p = p->next)
10256 {
10257 /* We have to strip the encoding and possibly remove user_label_prefix
10258 from the identifier in order to handle -fleading-underscore and
10259 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10260 real_name = default_strip_name_encoding (p->name);
10261 if (len && p->name[0] == '*'
10262 && !memcmp (real_name, user_label_prefix, len))
10263 real_name += len;
10264
10265 name_tree = get_identifier (real_name);
10266 if (! TREE_ASM_WRITTEN (name_tree))
10267 {
10268 TREE_ASM_WRITTEN (name_tree) = 1;
10269 fputs ("\t.extern\t", file);
10270 assemble_name (file, p->name);
10271 putc ('\n', file);
10272 }
10273 }
10274 }
10275
10276 /* Record an extern. */
10277
10278 void
10279 unicosmk_add_extern (const char *name)
10280 {
10281 struct unicosmk_extern_list *p;
10282
10283 p = (struct unicosmk_extern_list *)
10284 xmalloc (sizeof (struct unicosmk_extern_list));
10285 p->next = unicosmk_extern_head;
10286 p->name = name;
10287 unicosmk_extern_head = p;
10288 }
10289
10290 /* The Cray assembler generates incorrect code if identifiers which
10291 conflict with register names are used as instruction operands. We have
10292 to replace such identifiers with DEX expressions. */
10293
10294 /* Structure to collect identifiers which have been replaced by DEX
10295 expressions. */
10296 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10297
10298 struct unicosmk_dex {
10299 struct unicosmk_dex *next;
10300 const char *name;
10301 };
10302
10303 /* List of identifiers which have been replaced by DEX expressions. The DEX
10304 number is determined by the position in the list. */
10305
10306 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10307
10308 /* The number of elements in the DEX list. */
10309
10310 static int unicosmk_dex_count = 0;
10311
10312 /* Check if NAME must be replaced by a DEX expression. */
10313
10314 static int
10315 unicosmk_special_name (const char *name)
10316 {
10317 if (name[0] == '*')
10318 ++name;
10319
10320 if (name[0] == '$')
10321 ++name;
10322
10323 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10324 return 0;
10325
10326 switch (name[1])
10327 {
10328 case '1': case '2':
10329 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10330
10331 case '3':
10332 return (name[2] == '\0'
10333 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10334
10335 default:
10336 return (ISDIGIT (name[1]) && name[2] == '\0');
10337 }
10338 }
10339
10340 /* Return the DEX number if X must be replaced by a DEX expression and 0
10341 otherwise. */
10342
10343 static int
10344 unicosmk_need_dex (rtx x)
10345 {
10346 struct unicosmk_dex *dex;
10347 const char *name;
10348 int i;
10349
10350 if (GET_CODE (x) != SYMBOL_REF)
10351 return 0;
10352
10353 name = XSTR (x,0);
10354 if (! unicosmk_special_name (name))
10355 return 0;
10356
10357 i = unicosmk_dex_count;
10358 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10359 {
10360 if (! strcmp (name, dex->name))
10361 return i;
10362 --i;
10363 }
10364
10365 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10366 dex->name = name;
10367 dex->next = unicosmk_dex_list;
10368 unicosmk_dex_list = dex;
10369
10370 ++unicosmk_dex_count;
10371 return unicosmk_dex_count;
10372 }
10373
10374 /* Output the DEX definitions for this file. */
10375
10376 static void
10377 unicosmk_output_dex (FILE *file)
10378 {
10379 struct unicosmk_dex *dex;
10380 int i;
10381
10382 if (unicosmk_dex_list == NULL)
10383 return;
10384
10385 fprintf (file, "\t.dexstart\n");
10386
10387 i = unicosmk_dex_count;
10388 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10389 {
10390 fprintf (file, "\tDEX (%d) = ", i);
10391 assemble_name (file, dex->name);
10392 putc ('\n', file);
10393 --i;
10394 }
10395
10396 fprintf (file, "\t.dexend\n");
10397 }
10398
10399 /* Output text that to appear at the beginning of an assembler file. */
10400
10401 static void
10402 unicosmk_file_start (void)
10403 {
10404 int i;
10405
10406 fputs ("\t.ident\t", asm_out_file);
10407 unicosmk_output_module_name (asm_out_file);
10408 fputs ("\n\n", asm_out_file);
10409
10410 /* The Unicos/Mk assembler uses different register names. Instead of trying
10411 to support them, we simply use micro definitions. */
10412
10413 /* CAM has different register names: rN for the integer register N and fN
10414 for the floating-point register N. Instead of trying to use these in
10415 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10416 register. */
10417
10418 for (i = 0; i < 32; ++i)
10419 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10420
10421 for (i = 0; i < 32; ++i)
10422 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10423
10424 putc ('\n', asm_out_file);
10425
10426 /* The .align directive fill unused space with zeroes which does not work
10427 in code sections. We define the macro 'gcc@code@align' which uses nops
10428 instead. Note that it assumes that code sections always have the
10429 biggest possible alignment since . refers to the current offset from
10430 the beginning of the section. */
10431
10432 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10433 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10434 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10435 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10436 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10437 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10438 fputs ("\t.endr\n", asm_out_file);
10439 fputs ("\t.endif\n", asm_out_file);
10440 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10441
10442 /* Output extern declarations which should always be visible. */
10443 unicosmk_output_default_externs (asm_out_file);
10444
10445 /* Open a dummy section. We always need to be inside a section for the
10446 section-switching code to work correctly.
10447 ??? This should be a module id or something like that. I still have to
10448 figure out what the rules for those are. */
10449 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10450 }
10451
10452 /* Output text to appear at the end of an assembler file. This includes all
10453 pending extern declarations and DEX expressions. */
10454
10455 static void
10456 unicosmk_file_end (void)
10457 {
10458 fputs ("\t.endp\n\n", asm_out_file);
10459
10460 /* Output all pending externs. */
10461
10462 unicosmk_output_externs (asm_out_file);
10463
10464 /* Output dex definitions used for functions whose names conflict with
10465 register names. */
10466
10467 unicosmk_output_dex (asm_out_file);
10468
10469 fputs ("\t.end\t", asm_out_file);
10470 unicosmk_output_module_name (asm_out_file);
10471 putc ('\n', asm_out_file);
10472 }
10473
10474 #else
10475
10476 static void
10477 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10478 {}
10479
10480 static void
10481 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10482 {}
10483
10484 static void
10485 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10486 const char * fnname ATTRIBUTE_UNUSED)
10487 {}
10488
10489 rtx
10490 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10491 {
10492 return NULL_RTX;
10493 }
10494
10495 static int
10496 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10497 {
10498 return 0;
10499 }
10500
10501 #endif /* TARGET_ABI_UNICOSMK */
10502
10503 static void
10504 alpha_init_libfuncs (void)
10505 {
10506 if (TARGET_ABI_UNICOSMK)
10507 {
10508 /* Prevent gcc from generating calls to __divsi3. */
10509 set_optab_libfunc (sdiv_optab, SImode, 0);
10510 set_optab_libfunc (udiv_optab, SImode, 0);
10511
10512 /* Use the functions provided by the system library
10513 for DImode integer division. */
10514 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10515 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10516 }
10517 else if (TARGET_ABI_OPEN_VMS)
10518 {
10519 /* Use the VMS runtime library functions for division and
10520 remainder. */
10521 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10522 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10523 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10524 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10525 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10526 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10527 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10528 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10529 }
10530 }
10531
10532 \f
10533 /* Initialize the GCC target structure. */
10534 #if TARGET_ABI_OPEN_VMS
10535 # undef TARGET_ATTRIBUTE_TABLE
10536 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10537 # undef TARGET_SECTION_TYPE_FLAGS
10538 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10539 #endif
10540
10541 #undef TARGET_IN_SMALL_DATA_P
10542 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10543
10544 #if TARGET_ABI_UNICOSMK
10545 # undef TARGET_INSERT_ATTRIBUTES
10546 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10547 # undef TARGET_SECTION_TYPE_FLAGS
10548 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10549 # undef TARGET_ASM_UNIQUE_SECTION
10550 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10551 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10552 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10553 # undef TARGET_ASM_GLOBALIZE_LABEL
10554 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10555 # undef TARGET_MUST_PASS_IN_STACK
10556 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10557 #endif
10558
10559 #undef TARGET_ASM_ALIGNED_HI_OP
10560 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10561 #undef TARGET_ASM_ALIGNED_DI_OP
10562 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10563
10564 /* Default unaligned ops are provided for ELF systems. To get unaligned
10565 data for non-ELF systems, we have to turn off auto alignment. */
10566 #ifndef OBJECT_FORMAT_ELF
10567 #undef TARGET_ASM_UNALIGNED_HI_OP
10568 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10569 #undef TARGET_ASM_UNALIGNED_SI_OP
10570 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10571 #undef TARGET_ASM_UNALIGNED_DI_OP
10572 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10573 #endif
10574
10575 #ifdef OBJECT_FORMAT_ELF
10576 #undef TARGET_ASM_SELECT_RTX_SECTION
10577 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10578 #endif
10579
10580 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10581 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10582
10583 #undef TARGET_INIT_LIBFUNCS
10584 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10585
10586 #if TARGET_ABI_UNICOSMK
10587 #undef TARGET_ASM_FILE_START
10588 #define TARGET_ASM_FILE_START unicosmk_file_start
10589 #undef TARGET_ASM_FILE_END
10590 #define TARGET_ASM_FILE_END unicosmk_file_end
10591 #else
10592 #undef TARGET_ASM_FILE_START
10593 #define TARGET_ASM_FILE_START alpha_file_start
10594 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10595 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10596 #endif
10597
10598 #undef TARGET_SCHED_ADJUST_COST
10599 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10600 #undef TARGET_SCHED_ISSUE_RATE
10601 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10602 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10603 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10604 alpha_multipass_dfa_lookahead
10605
10606 #undef TARGET_HAVE_TLS
10607 #define TARGET_HAVE_TLS HAVE_AS_TLS
10608
10609 #undef TARGET_INIT_BUILTINS
10610 #define TARGET_INIT_BUILTINS alpha_init_builtins
10611 #undef TARGET_EXPAND_BUILTIN
10612 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10613 #undef TARGET_FOLD_BUILTIN
10614 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10615
10616 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10617 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10618 #undef TARGET_CANNOT_COPY_INSN_P
10619 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10620 #undef TARGET_CANNOT_FORCE_CONST_MEM
10621 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10622
10623 #if TARGET_ABI_OSF
10624 #undef TARGET_ASM_OUTPUT_MI_THUNK
10625 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10626 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10627 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10628 #undef TARGET_STDARG_OPTIMIZE_HOOK
10629 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10630 #endif
10631
10632 #undef TARGET_RTX_COSTS
10633 #define TARGET_RTX_COSTS alpha_rtx_costs
10634 #undef TARGET_ADDRESS_COST
10635 #define TARGET_ADDRESS_COST hook_int_rtx_0
10636
10637 #undef TARGET_MACHINE_DEPENDENT_REORG
10638 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10639
10640 #undef TARGET_PROMOTE_FUNCTION_ARGS
10641 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10642 #undef TARGET_PROMOTE_FUNCTION_RETURN
10643 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10644 #undef TARGET_PROMOTE_PROTOTYPES
10645 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10646 #undef TARGET_RETURN_IN_MEMORY
10647 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10648 #undef TARGET_PASS_BY_REFERENCE
10649 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10650 #undef TARGET_SETUP_INCOMING_VARARGS
10651 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10652 #undef TARGET_STRICT_ARGUMENT_NAMING
10653 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10654 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10655 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10656 #undef TARGET_SPLIT_COMPLEX_ARG
10657 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10658 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10659 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10660 #undef TARGET_ARG_PARTIAL_BYTES
10661 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10662
10663 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10664 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10665 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10666 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10667
10668 #undef TARGET_BUILD_BUILTIN_VA_LIST
10669 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10670
10671 /* The Alpha architecture does not require sequential consistency. See
10672 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10673 for an example of how it can be violated in practice. */
10674 #undef TARGET_RELAXED_ORDERING
10675 #define TARGET_RELAXED_ORDERING true
10676
10677 #undef TARGET_DEFAULT_TARGET_FLAGS
10678 #define TARGET_DEFAULT_TARGET_FLAGS \
10679 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10680 #undef TARGET_HANDLE_OPTION
10681 #define TARGET_HANDLE_OPTION alpha_handle_option
10682
10683 struct gcc_target targetm = TARGET_INITIALIZER;
10684
10685 \f
10686 #include "gt-alpha.h"