alpha.c (code_for_builtin): Replace special-case builtin codes with ctzdi2, clzdi2...
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 59 Temple Place - Suite 330,
21 Boston, MA 02111-1307, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56
57 /* Specify which cpu to schedule for. */
58
59 enum processor_type alpha_cpu;
60 static const char * const alpha_cpu_name[] =
61 {
62 "ev4", "ev5", "ev6"
63 };
64
65 /* Specify how accurate floating-point traps need to be. */
66
67 enum alpha_trap_precision alpha_tp;
68
69 /* Specify the floating-point rounding mode. */
70
71 enum alpha_fp_rounding_mode alpha_fprm;
72
73 /* Specify which things cause traps. */
74
75 enum alpha_fp_trap_mode alpha_fptm;
76
77 /* Specify bit size of immediate TLS offsets. */
78
79 int alpha_tls_size = 32;
80
81 /* Strings decoded into the above options. */
82
83 const char *alpha_cpu_string; /* -mcpu= */
84 const char *alpha_tune_string; /* -mtune= */
85 const char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
86 const char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
87 const char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
88 const char *alpha_mlat_string; /* -mmemory-latency= */
89 const char *alpha_tls_size_string; /* -mtls-size=[16|32|64] */
90
91 /* Save information from a "cmpxx" operation until the branch or scc is
92 emitted. */
93
94 struct alpha_compare alpha_compare;
95
96 /* Nonzero if inside of a function, because the Alpha asm can't
97 handle .files inside of functions. */
98
99 static int inside_function = FALSE;
100
101 /* The number of cycles of latency we should assume on memory reads. */
102
103 int alpha_memory_latency = 3;
104
105 /* Whether the function needs the GP. */
106
107 static int alpha_function_needs_gp;
108
109 /* The alias set for prologue/epilogue register save/restore. */
110
111 static GTY(()) int alpha_sr_alias_set;
112
113 /* The assembler name of the current function. */
114
115 static const char *alpha_fnname;
116
117 /* The next explicit relocation sequence number. */
118 extern GTY(()) int alpha_next_sequence_number;
119 int alpha_next_sequence_number = 1;
120
121 /* The literal and gpdisp sequence numbers for this insn, as printed
122 by %# and %* respectively. */
123 extern GTY(()) int alpha_this_literal_sequence_number;
124 extern GTY(()) int alpha_this_gpdisp_sequence_number;
125 int alpha_this_literal_sequence_number;
126 int alpha_this_gpdisp_sequence_number;
127
128 /* Costs of various operations on the different architectures. */
129
130 struct alpha_rtx_cost_data
131 {
132 unsigned char fp_add;
133 unsigned char fp_mult;
134 unsigned char fp_div_sf;
135 unsigned char fp_div_df;
136 unsigned char int_mult_si;
137 unsigned char int_mult_di;
138 unsigned char int_shift;
139 unsigned char int_cmov;
140 unsigned short int_div;
141 };
142
143 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
144 {
145 { /* EV4 */
146 COSTS_N_INSNS (6), /* fp_add */
147 COSTS_N_INSNS (6), /* fp_mult */
148 COSTS_N_INSNS (34), /* fp_div_sf */
149 COSTS_N_INSNS (63), /* fp_div_df */
150 COSTS_N_INSNS (23), /* int_mult_si */
151 COSTS_N_INSNS (23), /* int_mult_di */
152 COSTS_N_INSNS (2), /* int_shift */
153 COSTS_N_INSNS (2), /* int_cmov */
154 COSTS_N_INSNS (97), /* int_div */
155 },
156 { /* EV5 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (15), /* fp_div_sf */
160 COSTS_N_INSNS (22), /* fp_div_df */
161 COSTS_N_INSNS (8), /* int_mult_si */
162 COSTS_N_INSNS (12), /* int_mult_di */
163 COSTS_N_INSNS (1) + 1, /* int_shift */
164 COSTS_N_INSNS (1), /* int_cmov */
165 COSTS_N_INSNS (83), /* int_div */
166 },
167 { /* EV6 */
168 COSTS_N_INSNS (4), /* fp_add */
169 COSTS_N_INSNS (4), /* fp_mult */
170 COSTS_N_INSNS (12), /* fp_div_sf */
171 COSTS_N_INSNS (15), /* fp_div_df */
172 COSTS_N_INSNS (7), /* int_mult_si */
173 COSTS_N_INSNS (7), /* int_mult_di */
174 COSTS_N_INSNS (1), /* int_shift */
175 COSTS_N_INSNS (2), /* int_cmov */
176 COSTS_N_INSNS (86), /* int_div */
177 },
178 };
179
180 /* Similar but tuned for code size instead of execution latency. The
181 extra +N is fractional cost tuning based on latency. It's used to
182 encourage use of cheaper insns like shift, but only if there's just
183 one of them. */
184
185 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
186 {
187 COSTS_N_INSNS (1), /* fp_add */
188 COSTS_N_INSNS (1), /* fp_mult */
189 COSTS_N_INSNS (1), /* fp_div_sf */
190 COSTS_N_INSNS (1) + 1, /* fp_div_df */
191 COSTS_N_INSNS (1) + 1, /* int_mult_si */
192 COSTS_N_INSNS (1) + 2, /* int_mult_di */
193 COSTS_N_INSNS (1), /* int_shift */
194 COSTS_N_INSNS (1), /* int_cmov */
195 COSTS_N_INSNS (6), /* int_div */
196 };
197
198 /* Get the number of args of a function in one of two ways. */
199 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
200 #define NUM_ARGS current_function_args_info.num_args
201 #else
202 #define NUM_ARGS current_function_args_info
203 #endif
204
205 #define REG_PV 27
206 #define REG_RA 26
207
208 /* Declarations of static functions. */
209 static struct machine_function *alpha_init_machine_status (void);
210 static rtx alpha_emit_xfloating_compare (enum rtx_code, rtx, rtx);
211
212 #if TARGET_ABI_OPEN_VMS
213 static void alpha_write_linkage (FILE *, const char *, tree);
214 #endif
215
216 static void unicosmk_output_deferred_case_vectors (FILE *);
217 static void unicosmk_gen_dsib (unsigned long *);
218 static void unicosmk_output_ssib (FILE *, const char *);
219 static int unicosmk_need_dex (rtx);
220 \f
221 /* Parse target option strings. */
222
223 void
224 override_options (void)
225 {
226 int i;
227 static const struct cpu_table {
228 const char *const name;
229 const enum processor_type processor;
230 const int flags;
231 } cpu_table[] = {
232 #define EV5_MASK (MASK_CPU_EV5)
233 #define EV6_MASK (MASK_CPU_EV6|MASK_BWX|MASK_MAX|MASK_FIX)
234 { "ev4", PROCESSOR_EV4, 0 },
235 { "ev45", PROCESSOR_EV4, 0 },
236 { "21064", PROCESSOR_EV4, 0 },
237 { "ev5", PROCESSOR_EV5, EV5_MASK },
238 { "21164", PROCESSOR_EV5, EV5_MASK },
239 { "ev56", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
240 { "21164a", PROCESSOR_EV5, EV5_MASK|MASK_BWX },
241 { "pca56", PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
242 { "21164PC",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
243 { "21164pc",PROCESSOR_EV5, EV5_MASK|MASK_BWX|MASK_MAX },
244 { "ev6", PROCESSOR_EV6, EV6_MASK },
245 { "21264", PROCESSOR_EV6, EV6_MASK },
246 { "ev67", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
247 { "21264a", PROCESSOR_EV6, EV6_MASK|MASK_CIX },
248 { 0, 0, 0 }
249 };
250
251 /* Unicos/Mk doesn't have shared libraries. */
252 if (TARGET_ABI_UNICOSMK && flag_pic)
253 {
254 warning ("-f%s ignored for Unicos/Mk (not supported)",
255 (flag_pic > 1) ? "PIC" : "pic");
256 flag_pic = 0;
257 }
258
259 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
260 floating-point instructions. Make that the default for this target. */
261 if (TARGET_ABI_UNICOSMK)
262 alpha_fprm = ALPHA_FPRM_DYN;
263 else
264 alpha_fprm = ALPHA_FPRM_NORM;
265
266 alpha_tp = ALPHA_TP_PROG;
267 alpha_fptm = ALPHA_FPTM_N;
268
269 /* We cannot use su and sui qualifiers for conversion instructions on
270 Unicos/Mk. I'm not sure if this is due to assembler or hardware
271 limitations. Right now, we issue a warning if -mieee is specified
272 and then ignore it; eventually, we should either get it right or
273 disable the option altogether. */
274
275 if (TARGET_IEEE)
276 {
277 if (TARGET_ABI_UNICOSMK)
278 warning ("-mieee not supported on Unicos/Mk");
279 else
280 {
281 alpha_tp = ALPHA_TP_INSN;
282 alpha_fptm = ALPHA_FPTM_SU;
283 }
284 }
285
286 if (TARGET_IEEE_WITH_INEXACT)
287 {
288 if (TARGET_ABI_UNICOSMK)
289 warning ("-mieee-with-inexact not supported on Unicos/Mk");
290 else
291 {
292 alpha_tp = ALPHA_TP_INSN;
293 alpha_fptm = ALPHA_FPTM_SUI;
294 }
295 }
296
297 if (alpha_tp_string)
298 {
299 if (! strcmp (alpha_tp_string, "p"))
300 alpha_tp = ALPHA_TP_PROG;
301 else if (! strcmp (alpha_tp_string, "f"))
302 alpha_tp = ALPHA_TP_FUNC;
303 else if (! strcmp (alpha_tp_string, "i"))
304 alpha_tp = ALPHA_TP_INSN;
305 else
306 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
307 }
308
309 if (alpha_fprm_string)
310 {
311 if (! strcmp (alpha_fprm_string, "n"))
312 alpha_fprm = ALPHA_FPRM_NORM;
313 else if (! strcmp (alpha_fprm_string, "m"))
314 alpha_fprm = ALPHA_FPRM_MINF;
315 else if (! strcmp (alpha_fprm_string, "c"))
316 alpha_fprm = ALPHA_FPRM_CHOP;
317 else if (! strcmp (alpha_fprm_string,"d"))
318 alpha_fprm = ALPHA_FPRM_DYN;
319 else
320 error ("bad value %qs for -mfp-rounding-mode switch",
321 alpha_fprm_string);
322 }
323
324 if (alpha_fptm_string)
325 {
326 if (strcmp (alpha_fptm_string, "n") == 0)
327 alpha_fptm = ALPHA_FPTM_N;
328 else if (strcmp (alpha_fptm_string, "u") == 0)
329 alpha_fptm = ALPHA_FPTM_U;
330 else if (strcmp (alpha_fptm_string, "su") == 0)
331 alpha_fptm = ALPHA_FPTM_SU;
332 else if (strcmp (alpha_fptm_string, "sui") == 0)
333 alpha_fptm = ALPHA_FPTM_SUI;
334 else
335 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
336 }
337
338 if (alpha_tls_size_string)
339 {
340 if (strcmp (alpha_tls_size_string, "16") == 0)
341 alpha_tls_size = 16;
342 else if (strcmp (alpha_tls_size_string, "32") == 0)
343 alpha_tls_size = 32;
344 else if (strcmp (alpha_tls_size_string, "64") == 0)
345 alpha_tls_size = 64;
346 else
347 error ("bad value %qs for -mtls-size switch", alpha_tls_size_string);
348 }
349
350 alpha_cpu
351 = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
352 : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
353
354 if (alpha_cpu_string)
355 {
356 for (i = 0; cpu_table [i].name; i++)
357 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
358 {
359 alpha_cpu = cpu_table [i].processor;
360 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX
361 | MASK_CPU_EV5 | MASK_CPU_EV6);
362 target_flags |= cpu_table [i].flags;
363 break;
364 }
365 if (! cpu_table [i].name)
366 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
367 }
368
369 if (alpha_tune_string)
370 {
371 for (i = 0; cpu_table [i].name; i++)
372 if (! strcmp (alpha_tune_string, cpu_table [i].name))
373 {
374 alpha_cpu = cpu_table [i].processor;
375 break;
376 }
377 if (! cpu_table [i].name)
378 error ("bad value %qs for -mcpu switch", alpha_tune_string);
379 }
380
381 /* Do some sanity checks on the above options. */
382
383 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
384 {
385 warning ("trap mode not supported on Unicos/Mk");
386 alpha_fptm = ALPHA_FPTM_N;
387 }
388
389 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
390 && alpha_tp != ALPHA_TP_INSN && ! TARGET_CPU_EV6)
391 {
392 warning ("fp software completion requires -mtrap-precision=i");
393 alpha_tp = ALPHA_TP_INSN;
394 }
395
396 if (TARGET_CPU_EV6)
397 {
398 /* Except for EV6 pass 1 (not released), we always have precise
399 arithmetic traps. Which means we can do software completion
400 without minding trap shadows. */
401 alpha_tp = ALPHA_TP_PROG;
402 }
403
404 if (TARGET_FLOAT_VAX)
405 {
406 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
407 {
408 warning ("rounding mode not supported for VAX floats");
409 alpha_fprm = ALPHA_FPRM_NORM;
410 }
411 if (alpha_fptm == ALPHA_FPTM_SUI)
412 {
413 warning ("trap mode not supported for VAX floats");
414 alpha_fptm = ALPHA_FPTM_SU;
415 }
416 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
417 warning ("128-bit long double not supported for VAX floats");
418 target_flags &= ~MASK_LONG_DOUBLE_128;
419 }
420
421 {
422 char *end;
423 int lat;
424
425 if (!alpha_mlat_string)
426 alpha_mlat_string = "L1";
427
428 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
429 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
430 ;
431 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
432 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
433 && alpha_mlat_string[2] == '\0')
434 {
435 static int const cache_latency[][4] =
436 {
437 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
438 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
439 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
440 };
441
442 lat = alpha_mlat_string[1] - '0';
443 if (lat <= 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
444 {
445 warning ("L%d cache latency unknown for %s",
446 lat, alpha_cpu_name[alpha_cpu]);
447 lat = 3;
448 }
449 else
450 lat = cache_latency[alpha_cpu][lat-1];
451 }
452 else if (! strcmp (alpha_mlat_string, "main"))
453 {
454 /* Most current memories have about 370ns latency. This is
455 a reasonable guess for a fast cpu. */
456 lat = 150;
457 }
458 else
459 {
460 warning ("bad value %qs for -mmemory-latency", alpha_mlat_string);
461 lat = 3;
462 }
463
464 alpha_memory_latency = lat;
465 }
466
467 /* Default the definition of "small data" to 8 bytes. */
468 if (!g_switch_set)
469 g_switch_value = 8;
470
471 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
472 if (flag_pic == 1)
473 target_flags |= MASK_SMALL_DATA;
474 else if (flag_pic == 2)
475 target_flags &= ~MASK_SMALL_DATA;
476
477 /* Align labels and loops for optimal branching. */
478 /* ??? Kludge these by not doing anything if we don't optimize and also if
479 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
480 if (optimize > 0 && write_symbols != SDB_DEBUG)
481 {
482 if (align_loops <= 0)
483 align_loops = 16;
484 if (align_jumps <= 0)
485 align_jumps = 16;
486 }
487 if (align_functions <= 0)
488 align_functions = 16;
489
490 /* Acquire a unique set number for our register saves and restores. */
491 alpha_sr_alias_set = new_alias_set ();
492
493 /* Register variables and functions with the garbage collector. */
494
495 /* Set up function hooks. */
496 init_machine_status = alpha_init_machine_status;
497
498 /* Tell the compiler when we're using VAX floating point. */
499 if (TARGET_FLOAT_VAX)
500 {
501 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
502 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
503 REAL_MODE_FORMAT (TFmode) = NULL;
504 }
505 }
506 \f
507 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
508
509 int
510 zap_mask (HOST_WIDE_INT value)
511 {
512 int i;
513
514 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
515 i++, value >>= 8)
516 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
517 return 0;
518
519 return 1;
520 }
521
522 /* Return true if OP is valid for a particular TLS relocation.
523 We are already guaranteed that OP is a CONST. */
524
525 int
526 tls_symbolic_operand_1 (rtx op, int size, int unspec)
527 {
528 op = XEXP (op, 0);
529
530 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
531 return 0;
532 op = XVECEXP (op, 0, 0);
533
534 if (GET_CODE (op) != SYMBOL_REF)
535 return 0;
536
537 if (SYMBOL_REF_LOCAL_P (op))
538 {
539 if (alpha_tls_size > size)
540 return 0;
541 }
542 else
543 {
544 if (size != 64)
545 return 0;
546 }
547
548 switch (SYMBOL_REF_TLS_MODEL (op))
549 {
550 case TLS_MODEL_LOCAL_DYNAMIC:
551 return unspec == UNSPEC_DTPREL;
552 case TLS_MODEL_INITIAL_EXEC:
553 return unspec == UNSPEC_TPREL && size == 64;
554 case TLS_MODEL_LOCAL_EXEC:
555 return unspec == UNSPEC_TPREL;
556 default:
557 abort ();
558 }
559 }
560
561 /* Used by aligned_memory_operand and unaligned_memory_operand to
562 resolve what reload is going to do with OP if it's a register. */
563
564 rtx
565 resolve_reload_operand (rtx op)
566 {
567 if (reload_in_progress)
568 {
569 rtx tmp = op;
570 if (GET_CODE (tmp) == SUBREG)
571 tmp = SUBREG_REG (tmp);
572 if (GET_CODE (tmp) == REG
573 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
574 {
575 op = reg_equiv_memory_loc[REGNO (tmp)];
576 if (op == 0)
577 return 0;
578 }
579 }
580 return op;
581 }
582
583 /* Implements CONST_OK_FOR_LETTER_P. Return true if the value matches
584 the range defined for C in [I-P]. */
585
586 bool
587 alpha_const_ok_for_letter_p (HOST_WIDE_INT value, int c)
588 {
589 switch (c)
590 {
591 case 'I':
592 /* An unsigned 8 bit constant. */
593 return (unsigned HOST_WIDE_INT) value < 0x100;
594 case 'J':
595 /* The constant zero. */
596 return value == 0;
597 case 'K':
598 /* A signed 16 bit constant. */
599 return (unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000;
600 case 'L':
601 /* A shifted signed 16 bit constant appropriate for LDAH. */
602 return ((value & 0xffff) == 0
603 && ((value) >> 31 == -1 || value >> 31 == 0));
604 case 'M':
605 /* A constant that can be AND'ed with using a ZAP insn. */
606 return zap_mask (value);
607 case 'N':
608 /* A complemented unsigned 8 bit constant. */
609 return (unsigned HOST_WIDE_INT) (~ value) < 0x100;
610 case 'O':
611 /* A negated unsigned 8 bit constant. */
612 return (unsigned HOST_WIDE_INT) (- value) < 0x100;
613 case 'P':
614 /* The constant 1, 2 or 3. */
615 return value == 1 || value == 2 || value == 3;
616
617 default:
618 return false;
619 }
620 }
621
622 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
623 matches for C in [GH]. */
624
625 bool
626 alpha_const_double_ok_for_letter_p (rtx value, int c)
627 {
628 switch (c)
629 {
630 case 'G':
631 /* The floating point zero constant. */
632 return (GET_MODE_CLASS (GET_MODE (value)) == MODE_FLOAT
633 && value == CONST0_RTX (GET_MODE (value)));
634
635 case 'H':
636 /* A valid operand of a ZAP insn. */
637 return (GET_MODE (value) == VOIDmode
638 && zap_mask (CONST_DOUBLE_LOW (value))
639 && zap_mask (CONST_DOUBLE_HIGH (value)));
640
641 default:
642 return false;
643 }
644 }
645
646 /* Implements CONST_DOUBLE_OK_FOR_LETTER_P. Return true if VALUE
647 matches for C. */
648
649 bool
650 alpha_extra_constraint (rtx value, int c)
651 {
652 switch (c)
653 {
654 case 'Q':
655 return normal_memory_operand (value, VOIDmode);
656 case 'R':
657 return direct_call_operand (value, Pmode);
658 case 'S':
659 return (GET_CODE (value) == CONST_INT
660 && (unsigned HOST_WIDE_INT) INTVAL (value) < 64);
661 case 'T':
662 return GET_CODE (value) == HIGH;
663 case 'U':
664 return TARGET_ABI_UNICOSMK && symbolic_operand (value, VOIDmode);
665 case 'W':
666 return (GET_CODE (value) == CONST_VECTOR
667 && value == CONST0_RTX (GET_MODE (value)));
668 default:
669 return false;
670 }
671 }
672
673 /* The scalar modes supported differs from the default check-what-c-supports
674 version in that sometimes TFmode is available even when long double
675 indicates only DFmode. On unicosmk, we have the situation that HImode
676 doesn't map to any C type, but of course we still support that. */
677
678 static bool
679 alpha_scalar_mode_supported_p (enum machine_mode mode)
680 {
681 switch (mode)
682 {
683 case QImode:
684 case HImode:
685 case SImode:
686 case DImode:
687 case TImode: /* via optabs.c */
688 return true;
689
690 case SFmode:
691 case DFmode:
692 return true;
693
694 case TFmode:
695 return TARGET_HAS_XFLOATING_LIBS;
696
697 default:
698 return false;
699 }
700 }
701
702 /* Alpha implements a couple of integer vector mode operations when
703 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
704 which allows the vectorizer to operate on e.g. move instructions,
705 or when expand_vector_operations can do something useful. */
706
707 static bool
708 alpha_vector_mode_supported_p (enum machine_mode mode)
709 {
710 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
711 }
712
713 /* Return 1 if this function can directly return via $26. */
714
715 int
716 direct_return (void)
717 {
718 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
719 && reload_completed
720 && alpha_sa_size () == 0
721 && get_frame_size () == 0
722 && current_function_outgoing_args_size == 0
723 && current_function_pretend_args_size == 0);
724 }
725
726 /* Return the ADDR_VEC associated with a tablejump insn. */
727
728 rtx
729 alpha_tablejump_addr_vec (rtx insn)
730 {
731 rtx tmp;
732
733 tmp = JUMP_LABEL (insn);
734 if (!tmp)
735 return NULL_RTX;
736 tmp = NEXT_INSN (tmp);
737 if (!tmp)
738 return NULL_RTX;
739 if (GET_CODE (tmp) == JUMP_INSN
740 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
741 return PATTERN (tmp);
742 return NULL_RTX;
743 }
744
745 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
746
747 rtx
748 alpha_tablejump_best_label (rtx insn)
749 {
750 rtx jump_table = alpha_tablejump_addr_vec (insn);
751 rtx best_label = NULL_RTX;
752
753 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
754 there for edge frequency counts from profile data. */
755
756 if (jump_table)
757 {
758 int n_labels = XVECLEN (jump_table, 1);
759 int best_count = -1;
760 int i, j;
761
762 for (i = 0; i < n_labels; i++)
763 {
764 int count = 1;
765
766 for (j = i + 1; j < n_labels; j++)
767 if (XEXP (XVECEXP (jump_table, 1, i), 0)
768 == XEXP (XVECEXP (jump_table, 1, j), 0))
769 count++;
770
771 if (count > best_count)
772 best_count = count, best_label = XVECEXP (jump_table, 1, i);
773 }
774 }
775
776 return best_label ? best_label : const0_rtx;
777 }
778
779 /* Return the TLS model to use for SYMBOL. */
780
781 static enum tls_model
782 tls_symbolic_operand_type (rtx symbol)
783 {
784 enum tls_model model;
785
786 if (GET_CODE (symbol) != SYMBOL_REF)
787 return 0;
788 model = SYMBOL_REF_TLS_MODEL (symbol);
789
790 /* Local-exec with a 64-bit size is the same code as initial-exec. */
791 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
792 model = TLS_MODEL_INITIAL_EXEC;
793
794 return model;
795 }
796 \f
797 /* Return true if the function DECL will share the same GP as any
798 function in the current unit of translation. */
799
800 static bool
801 decl_has_samegp (tree decl)
802 {
803 /* Functions that are not local can be overridden, and thus may
804 not share the same gp. */
805 if (!(*targetm.binds_local_p) (decl))
806 return false;
807
808 /* If -msmall-data is in effect, assume that there is only one GP
809 for the module, and so any local symbol has this property. We
810 need explicit relocations to be able to enforce this for symbols
811 not defined in this unit of translation, however. */
812 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
813 return true;
814
815 /* Functions that are not external are defined in this UoT. */
816 /* ??? Irritatingly, static functions not yet emitted are still
817 marked "external". Apply this to non-static functions only. */
818 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
819 }
820
821 /* Return true if EXP should be placed in the small data section. */
822
823 static bool
824 alpha_in_small_data_p (tree exp)
825 {
826 /* We want to merge strings, so we never consider them small data. */
827 if (TREE_CODE (exp) == STRING_CST)
828 return false;
829
830 /* Functions are never in the small data area. Duh. */
831 if (TREE_CODE (exp) == FUNCTION_DECL)
832 return false;
833
834 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
835 {
836 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
837 if (strcmp (section, ".sdata") == 0
838 || strcmp (section, ".sbss") == 0)
839 return true;
840 }
841 else
842 {
843 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
844
845 /* If this is an incomplete type with size 0, then we can't put it
846 in sdata because it might be too big when completed. */
847 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
848 return true;
849 }
850
851 return false;
852 }
853
854 #if TARGET_ABI_OPEN_VMS
855 static bool
856 alpha_linkage_symbol_p (const char *symname)
857 {
858 int symlen = strlen (symname);
859
860 if (symlen > 4)
861 return strcmp (&symname [symlen - 4], "..lk") == 0;
862
863 return false;
864 }
865
866 #define LINKAGE_SYMBOL_REF_P(X) \
867 ((GET_CODE (X) == SYMBOL_REF \
868 && alpha_linkage_symbol_p (XSTR (X, 0))) \
869 || (GET_CODE (X) == CONST \
870 && GET_CODE (XEXP (X, 0)) == PLUS \
871 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
872 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
873 #endif
874
875 /* legitimate_address_p recognizes an RTL expression that is a valid
876 memory address for an instruction. The MODE argument is the
877 machine mode for the MEM expression that wants to use this address.
878
879 For Alpha, we have either a constant address or the sum of a
880 register and a constant address, or just a register. For DImode,
881 any of those forms can be surrounded with an AND that clear the
882 low-order three bits; this is an "unaligned" access. */
883
884 bool
885 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
886 {
887 /* If this is an ldq_u type address, discard the outer AND. */
888 if (mode == DImode
889 && GET_CODE (x) == AND
890 && GET_CODE (XEXP (x, 1)) == CONST_INT
891 && INTVAL (XEXP (x, 1)) == -8)
892 x = XEXP (x, 0);
893
894 /* Discard non-paradoxical subregs. */
895 if (GET_CODE (x) == SUBREG
896 && (GET_MODE_SIZE (GET_MODE (x))
897 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
898 x = SUBREG_REG (x);
899
900 /* Unadorned general registers are valid. */
901 if (REG_P (x)
902 && (strict
903 ? STRICT_REG_OK_FOR_BASE_P (x)
904 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
905 return true;
906
907 /* Constant addresses (i.e. +/- 32k) are valid. */
908 if (CONSTANT_ADDRESS_P (x))
909 return true;
910
911 #if TARGET_ABI_OPEN_VMS
912 if (LINKAGE_SYMBOL_REF_P (x))
913 return true;
914 #endif
915
916 /* Register plus a small constant offset is valid. */
917 if (GET_CODE (x) == PLUS)
918 {
919 rtx ofs = XEXP (x, 1);
920 x = XEXP (x, 0);
921
922 /* Discard non-paradoxical subregs. */
923 if (GET_CODE (x) == SUBREG
924 && (GET_MODE_SIZE (GET_MODE (x))
925 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
926 x = SUBREG_REG (x);
927
928 if (REG_P (x))
929 {
930 if (! strict
931 && NONSTRICT_REG_OK_FP_BASE_P (x)
932 && GET_CODE (ofs) == CONST_INT)
933 return true;
934 if ((strict
935 ? STRICT_REG_OK_FOR_BASE_P (x)
936 : NONSTRICT_REG_OK_FOR_BASE_P (x))
937 && CONSTANT_ADDRESS_P (ofs))
938 return true;
939 }
940 }
941
942 /* If we're managing explicit relocations, LO_SUM is valid, as
943 are small data symbols. */
944 else if (TARGET_EXPLICIT_RELOCS)
945 {
946 if (small_symbolic_operand (x, Pmode))
947 return true;
948
949 if (GET_CODE (x) == LO_SUM)
950 {
951 rtx ofs = XEXP (x, 1);
952 x = XEXP (x, 0);
953
954 /* Discard non-paradoxical subregs. */
955 if (GET_CODE (x) == SUBREG
956 && (GET_MODE_SIZE (GET_MODE (x))
957 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
958 x = SUBREG_REG (x);
959
960 /* Must have a valid base register. */
961 if (! (REG_P (x)
962 && (strict
963 ? STRICT_REG_OK_FOR_BASE_P (x)
964 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
965 return false;
966
967 /* The symbol must be local. */
968 if (local_symbolic_operand (ofs, Pmode)
969 || dtp32_symbolic_operand (ofs, Pmode)
970 || tp32_symbolic_operand (ofs, Pmode))
971 return true;
972 }
973 }
974
975 return false;
976 }
977
978 /* Build the SYMBOL_REF for __tls_get_addr. */
979
980 static GTY(()) rtx tls_get_addr_libfunc;
981
982 static rtx
983 get_tls_get_addr (void)
984 {
985 if (!tls_get_addr_libfunc)
986 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
987 return tls_get_addr_libfunc;
988 }
989
990 /* Try machine-dependent ways of modifying an illegitimate address
991 to be legitimate. If we find one, return the new, valid address. */
992
993 rtx
994 alpha_legitimize_address (rtx x, rtx scratch,
995 enum machine_mode mode ATTRIBUTE_UNUSED)
996 {
997 HOST_WIDE_INT addend;
998
999 /* If the address is (plus reg const_int) and the CONST_INT is not a
1000 valid offset, compute the high part of the constant and add it to
1001 the register. Then our address is (plus temp low-part-const). */
1002 if (GET_CODE (x) == PLUS
1003 && GET_CODE (XEXP (x, 0)) == REG
1004 && GET_CODE (XEXP (x, 1)) == CONST_INT
1005 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
1006 {
1007 addend = INTVAL (XEXP (x, 1));
1008 x = XEXP (x, 0);
1009 goto split_addend;
1010 }
1011
1012 /* If the address is (const (plus FOO const_int)), find the low-order
1013 part of the CONST_INT. Then load FOO plus any high-order part of the
1014 CONST_INT into a register. Our address is (plus reg low-part-const).
1015 This is done to reduce the number of GOT entries. */
1016 if (!no_new_pseudos
1017 && GET_CODE (x) == CONST
1018 && GET_CODE (XEXP (x, 0)) == PLUS
1019 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
1020 {
1021 addend = INTVAL (XEXP (XEXP (x, 0), 1));
1022 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
1023 goto split_addend;
1024 }
1025
1026 /* If we have a (plus reg const), emit the load as in (2), then add
1027 the two registers, and finally generate (plus reg low-part-const) as
1028 our address. */
1029 if (!no_new_pseudos
1030 && GET_CODE (x) == PLUS
1031 && GET_CODE (XEXP (x, 0)) == REG
1032 && GET_CODE (XEXP (x, 1)) == CONST
1033 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
1034 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
1035 {
1036 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
1037 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
1038 XEXP (XEXP (XEXP (x, 1), 0), 0),
1039 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1040 goto split_addend;
1041 }
1042
1043 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
1044 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
1045 {
1046 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
1047
1048 switch (tls_symbolic_operand_type (x))
1049 {
1050 case TLS_MODEL_GLOBAL_DYNAMIC:
1051 start_sequence ();
1052
1053 r0 = gen_rtx_REG (Pmode, 0);
1054 r16 = gen_rtx_REG (Pmode, 16);
1055 tga = get_tls_get_addr ();
1056 dest = gen_reg_rtx (Pmode);
1057 seq = GEN_INT (alpha_next_sequence_number++);
1058
1059 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1060 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1061 insn = emit_call_insn (insn);
1062 CONST_OR_PURE_CALL_P (insn) = 1;
1063 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1064
1065 insn = get_insns ();
1066 end_sequence ();
1067
1068 emit_libcall_block (insn, dest, r0, x);
1069 return dest;
1070
1071 case TLS_MODEL_LOCAL_DYNAMIC:
1072 start_sequence ();
1073
1074 r0 = gen_rtx_REG (Pmode, 0);
1075 r16 = gen_rtx_REG (Pmode, 16);
1076 tga = get_tls_get_addr ();
1077 scratch = gen_reg_rtx (Pmode);
1078 seq = GEN_INT (alpha_next_sequence_number++);
1079
1080 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1081 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1082 insn = emit_call_insn (insn);
1083 CONST_OR_PURE_CALL_P (insn) = 1;
1084 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1085
1086 insn = get_insns ();
1087 end_sequence ();
1088
1089 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1090 UNSPEC_TLSLDM_CALL);
1091 emit_libcall_block (insn, scratch, r0, eqv);
1092
1093 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1094 eqv = gen_rtx_CONST (Pmode, eqv);
1095
1096 if (alpha_tls_size == 64)
1097 {
1098 dest = gen_reg_rtx (Pmode);
1099 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1100 emit_insn (gen_adddi3 (dest, dest, scratch));
1101 return dest;
1102 }
1103 if (alpha_tls_size == 32)
1104 {
1105 insn = gen_rtx_HIGH (Pmode, eqv);
1106 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1107 scratch = gen_reg_rtx (Pmode);
1108 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1109 }
1110 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1111
1112 case TLS_MODEL_INITIAL_EXEC:
1113 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1114 eqv = gen_rtx_CONST (Pmode, eqv);
1115 tp = gen_reg_rtx (Pmode);
1116 scratch = gen_reg_rtx (Pmode);
1117 dest = gen_reg_rtx (Pmode);
1118
1119 emit_insn (gen_load_tp (tp));
1120 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1121 emit_insn (gen_adddi3 (dest, tp, scratch));
1122 return dest;
1123
1124 case TLS_MODEL_LOCAL_EXEC:
1125 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1126 eqv = gen_rtx_CONST (Pmode, eqv);
1127 tp = gen_reg_rtx (Pmode);
1128
1129 emit_insn (gen_load_tp (tp));
1130 if (alpha_tls_size == 32)
1131 {
1132 insn = gen_rtx_HIGH (Pmode, eqv);
1133 insn = gen_rtx_PLUS (Pmode, tp, insn);
1134 tp = gen_reg_rtx (Pmode);
1135 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1136 }
1137 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1138 }
1139
1140 if (local_symbolic_operand (x, Pmode))
1141 {
1142 if (small_symbolic_operand (x, Pmode))
1143 return x;
1144 else
1145 {
1146 if (!no_new_pseudos)
1147 scratch = gen_reg_rtx (Pmode);
1148 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1149 gen_rtx_HIGH (Pmode, x)));
1150 return gen_rtx_LO_SUM (Pmode, scratch, x);
1151 }
1152 }
1153 }
1154
1155 return NULL;
1156
1157 split_addend:
1158 {
1159 HOST_WIDE_INT low, high;
1160
1161 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1162 addend -= low;
1163 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1164 addend -= high;
1165
1166 if (addend)
1167 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1168 (no_new_pseudos ? scratch : NULL_RTX),
1169 1, OPTAB_LIB_WIDEN);
1170 if (high)
1171 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1172 (no_new_pseudos ? scratch : NULL_RTX),
1173 1, OPTAB_LIB_WIDEN);
1174
1175 return plus_constant (x, low);
1176 }
1177 }
1178
1179 /* Primarily this is required for TLS symbols, but given that our move
1180 patterns *ought* to be able to handle any symbol at any time, we
1181 should never be spilling symbolic operands to the constant pool, ever. */
1182
1183 static bool
1184 alpha_cannot_force_const_mem (rtx x)
1185 {
1186 enum rtx_code code = GET_CODE (x);
1187 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1188 }
1189
1190 /* We do not allow indirect calls to be optimized into sibling calls, nor
1191 can we allow a call to a function with a different GP to be optimized
1192 into a sibcall. */
1193
1194 static bool
1195 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1196 {
1197 /* Can't do indirect tail calls, since we don't know if the target
1198 uses the same GP. */
1199 if (!decl)
1200 return false;
1201
1202 /* Otherwise, we can make a tail call if the target function shares
1203 the same GP. */
1204 return decl_has_samegp (decl);
1205 }
1206
1207 int
1208 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1209 {
1210 rtx x = *px;
1211
1212 /* Don't re-split. */
1213 if (GET_CODE (x) == LO_SUM)
1214 return -1;
1215
1216 return small_symbolic_operand (x, Pmode) != 0;
1217 }
1218
1219 static int
1220 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1221 {
1222 rtx x = *px;
1223
1224 /* Don't re-split. */
1225 if (GET_CODE (x) == LO_SUM)
1226 return -1;
1227
1228 if (small_symbolic_operand (x, Pmode))
1229 {
1230 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1231 *px = x;
1232 return -1;
1233 }
1234
1235 return 0;
1236 }
1237
1238 rtx
1239 split_small_symbolic_operand (rtx x)
1240 {
1241 x = copy_insn (x);
1242 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1243 return x;
1244 }
1245
1246 /* Indicate that INSN cannot be duplicated. This is true for any insn
1247 that we've marked with gpdisp relocs, since those have to stay in
1248 1-1 correspondence with one another.
1249
1250 Technically we could copy them if we could set up a mapping from one
1251 sequence number to another, across the set of insns to be duplicated.
1252 This seems overly complicated and error-prone since interblock motion
1253 from sched-ebb could move one of the pair of insns to a different block.
1254
1255 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1256 then they'll be in a different block from their ldgp. Which could lead
1257 the bb reorder code to think that it would be ok to copy just the block
1258 containing the call and branch to the block containing the ldgp. */
1259
1260 static bool
1261 alpha_cannot_copy_insn_p (rtx insn)
1262 {
1263 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1264 return false;
1265 if (recog_memoized (insn) >= 0)
1266 return get_attr_cannot_copy (insn);
1267 else
1268 return false;
1269 }
1270
1271
1272 /* Try a machine-dependent way of reloading an illegitimate address
1273 operand. If we find one, push the reload and return the new rtx. */
1274
1275 rtx
1276 alpha_legitimize_reload_address (rtx x,
1277 enum machine_mode mode ATTRIBUTE_UNUSED,
1278 int opnum, int type,
1279 int ind_levels ATTRIBUTE_UNUSED)
1280 {
1281 /* We must recognize output that we have already generated ourselves. */
1282 if (GET_CODE (x) == PLUS
1283 && GET_CODE (XEXP (x, 0)) == PLUS
1284 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1285 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1286 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1287 {
1288 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1289 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1290 opnum, type);
1291 return x;
1292 }
1293
1294 /* We wish to handle large displacements off a base register by
1295 splitting the addend across an ldah and the mem insn. This
1296 cuts number of extra insns needed from 3 to 1. */
1297 if (GET_CODE (x) == PLUS
1298 && GET_CODE (XEXP (x, 0)) == REG
1299 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1300 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1301 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1302 {
1303 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1304 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1305 HOST_WIDE_INT high
1306 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1307
1308 /* Check for 32-bit overflow. */
1309 if (high + low != val)
1310 return NULL_RTX;
1311
1312 /* Reload the high part into a base reg; leave the low part
1313 in the mem directly. */
1314 x = gen_rtx_PLUS (GET_MODE (x),
1315 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1316 GEN_INT (high)),
1317 GEN_INT (low));
1318
1319 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1320 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1321 opnum, type);
1322 return x;
1323 }
1324
1325 return NULL_RTX;
1326 }
1327 \f
1328 /* Compute a (partial) cost for rtx X. Return true if the complete
1329 cost has been computed, and false if subexpressions should be
1330 scanned. In either case, *TOTAL contains the cost result. */
1331
1332 static bool
1333 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1334 {
1335 enum machine_mode mode = GET_MODE (x);
1336 bool float_mode_p = FLOAT_MODE_P (mode);
1337 const struct alpha_rtx_cost_data *cost_data;
1338
1339 if (optimize_size)
1340 cost_data = &alpha_rtx_cost_size;
1341 else
1342 cost_data = &alpha_rtx_cost_data[alpha_cpu];
1343
1344 switch (code)
1345 {
1346 case CONST_INT:
1347 /* If this is an 8-bit constant, return zero since it can be used
1348 nearly anywhere with no cost. If it is a valid operand for an
1349 ADD or AND, likewise return 0 if we know it will be used in that
1350 context. Otherwise, return 2 since it might be used there later.
1351 All other constants take at least two insns. */
1352 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1353 {
1354 *total = 0;
1355 return true;
1356 }
1357 /* FALLTHRU */
1358
1359 case CONST_DOUBLE:
1360 if (x == CONST0_RTX (mode))
1361 *total = 0;
1362 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1363 || (outer_code == AND && and_operand (x, VOIDmode)))
1364 *total = 0;
1365 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1366 *total = 2;
1367 else
1368 *total = COSTS_N_INSNS (2);
1369 return true;
1370
1371 case CONST:
1372 case SYMBOL_REF:
1373 case LABEL_REF:
1374 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1375 *total = COSTS_N_INSNS (outer_code != MEM);
1376 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1377 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1378 else if (tls_symbolic_operand_type (x))
1379 /* Estimate of cost for call_pal rduniq. */
1380 /* ??? How many insns do we emit here? More than one... */
1381 *total = COSTS_N_INSNS (15);
1382 else
1383 /* Otherwise we do a load from the GOT. */
1384 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1385 return true;
1386
1387 case HIGH:
1388 /* This is effectively an add_operand. */
1389 *total = 2;
1390 return true;
1391
1392 case PLUS:
1393 case MINUS:
1394 if (float_mode_p)
1395 *total = cost_data->fp_add;
1396 else if (GET_CODE (XEXP (x, 0)) == MULT
1397 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1398 {
1399 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1400 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1401 return true;
1402 }
1403 return false;
1404
1405 case MULT:
1406 if (float_mode_p)
1407 *total = cost_data->fp_mult;
1408 else if (mode == DImode)
1409 *total = cost_data->int_mult_di;
1410 else
1411 *total = cost_data->int_mult_si;
1412 return false;
1413
1414 case ASHIFT:
1415 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1416 && INTVAL (XEXP (x, 1)) <= 3)
1417 {
1418 *total = COSTS_N_INSNS (1);
1419 return false;
1420 }
1421 /* FALLTHRU */
1422
1423 case ASHIFTRT:
1424 case LSHIFTRT:
1425 *total = cost_data->int_shift;
1426 return false;
1427
1428 case IF_THEN_ELSE:
1429 if (float_mode_p)
1430 *total = cost_data->fp_add;
1431 else
1432 *total = cost_data->int_cmov;
1433 return false;
1434
1435 case DIV:
1436 case UDIV:
1437 case MOD:
1438 case UMOD:
1439 if (!float_mode_p)
1440 *total = cost_data->int_div;
1441 else if (mode == SFmode)
1442 *total = cost_data->fp_div_sf;
1443 else
1444 *total = cost_data->fp_div_df;
1445 return false;
1446
1447 case MEM:
1448 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1449 return true;
1450
1451 case NEG:
1452 if (! float_mode_p)
1453 {
1454 *total = COSTS_N_INSNS (1);
1455 return false;
1456 }
1457 /* FALLTHRU */
1458
1459 case ABS:
1460 if (! float_mode_p)
1461 {
1462 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1463 return false;
1464 }
1465 /* FALLTHRU */
1466
1467 case FLOAT:
1468 case UNSIGNED_FLOAT:
1469 case FIX:
1470 case UNSIGNED_FIX:
1471 case FLOAT_EXTEND:
1472 case FLOAT_TRUNCATE:
1473 *total = cost_data->fp_add;
1474 return false;
1475
1476 default:
1477 return false;
1478 }
1479 }
1480 \f
1481 /* REF is an alignable memory location. Place an aligned SImode
1482 reference into *PALIGNED_MEM and the number of bits to shift into
1483 *PBITNUM. SCRATCH is a free register for use in reloading out
1484 of range stack slots. */
1485
1486 void
1487 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1488 {
1489 rtx base;
1490 HOST_WIDE_INT offset = 0;
1491
1492 if (GET_CODE (ref) != MEM)
1493 abort ();
1494
1495 if (reload_in_progress
1496 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1497 {
1498 base = find_replacement (&XEXP (ref, 0));
1499
1500 if (! memory_address_p (GET_MODE (ref), base))
1501 abort ();
1502 }
1503 else
1504 {
1505 base = XEXP (ref, 0);
1506 }
1507
1508 if (GET_CODE (base) == PLUS)
1509 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1510
1511 *paligned_mem
1512 = widen_memory_access (ref, SImode, (offset & ~3) - offset);
1513
1514 if (WORDS_BIG_ENDIAN)
1515 *pbitnum = GEN_INT (32 - (GET_MODE_BITSIZE (GET_MODE (ref))
1516 + (offset & 3) * 8));
1517 else
1518 *pbitnum = GEN_INT ((offset & 3) * 8);
1519 }
1520
1521 /* Similar, but just get the address. Handle the two reload cases.
1522 Add EXTRA_OFFSET to the address we return. */
1523
1524 rtx
1525 get_unaligned_address (rtx ref, int extra_offset)
1526 {
1527 rtx base;
1528 HOST_WIDE_INT offset = 0;
1529
1530 if (GET_CODE (ref) != MEM)
1531 abort ();
1532
1533 if (reload_in_progress
1534 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1535 {
1536 base = find_replacement (&XEXP (ref, 0));
1537
1538 if (! memory_address_p (GET_MODE (ref), base))
1539 abort ();
1540 }
1541 else
1542 {
1543 base = XEXP (ref, 0);
1544 }
1545
1546 if (GET_CODE (base) == PLUS)
1547 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1548
1549 return plus_constant (base, offset + extra_offset);
1550 }
1551
1552 /* On the Alpha, all (non-symbolic) constants except zero go into
1553 a floating-point register via memory. Note that we cannot
1554 return anything that is not a subset of CLASS, and that some
1555 symbolic constants cannot be dropped to memory. */
1556
1557 enum reg_class
1558 alpha_preferred_reload_class(rtx x, enum reg_class class)
1559 {
1560 /* Zero is present in any register class. */
1561 if (x == CONST0_RTX (GET_MODE (x)))
1562 return class;
1563
1564 /* These sorts of constants we can easily drop to memory. */
1565 if (GET_CODE (x) == CONST_INT
1566 || GET_CODE (x) == CONST_DOUBLE
1567 || GET_CODE (x) == CONST_VECTOR)
1568 {
1569 if (class == FLOAT_REGS)
1570 return NO_REGS;
1571 if (class == ALL_REGS)
1572 return GENERAL_REGS;
1573 return class;
1574 }
1575
1576 /* All other kinds of constants should not (and in the case of HIGH
1577 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1578 secondary reload. */
1579 if (CONSTANT_P (x))
1580 return (class == ALL_REGS ? GENERAL_REGS : class);
1581
1582 return class;
1583 }
1584
1585 /* Loading and storing HImode or QImode values to and from memory
1586 usually requires a scratch register. The exceptions are loading
1587 QImode and HImode from an aligned address to a general register
1588 unless byte instructions are permitted.
1589
1590 We also cannot load an unaligned address or a paradoxical SUBREG
1591 into an FP register.
1592
1593 We also cannot do integral arithmetic into FP regs, as might result
1594 from register elimination into a DImode fp register. */
1595
1596 enum reg_class
1597 secondary_reload_class (enum reg_class class, enum machine_mode mode,
1598 rtx x, int in)
1599 {
1600 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1601 {
1602 if (GET_CODE (x) == MEM
1603 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1604 || (GET_CODE (x) == SUBREG
1605 && (GET_CODE (SUBREG_REG (x)) == MEM
1606 || (GET_CODE (SUBREG_REG (x)) == REG
1607 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1608 {
1609 if (!in || !aligned_memory_operand(x, mode))
1610 return GENERAL_REGS;
1611 }
1612 }
1613
1614 if (class == FLOAT_REGS)
1615 {
1616 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1617 return GENERAL_REGS;
1618
1619 if (GET_CODE (x) == SUBREG
1620 && (GET_MODE_SIZE (GET_MODE (x))
1621 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1622 return GENERAL_REGS;
1623
1624 if (in && INTEGRAL_MODE_P (mode)
1625 && ! (memory_operand (x, mode) || x == const0_rtx))
1626 return GENERAL_REGS;
1627 }
1628
1629 return NO_REGS;
1630 }
1631 \f
1632 /* Subfunction of the following function. Update the flags of any MEM
1633 found in part of X. */
1634
1635 static int
1636 alpha_set_memflags_1 (rtx *xp, void *data)
1637 {
1638 rtx x = *xp, orig = (rtx) data;
1639
1640 if (GET_CODE (x) != MEM)
1641 return 0;
1642
1643 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1644 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1645 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1646 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1647 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1648
1649 /* Sadly, we cannot use alias sets because the extra aliasing
1650 produced by the AND interferes. Given that two-byte quantities
1651 are the only thing we would be able to differentiate anyway,
1652 there does not seem to be any point in convoluting the early
1653 out of the alias check. */
1654
1655 return -1;
1656 }
1657
1658 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1659 generated to perform a memory operation, look for any MEMs in either
1660 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1661 volatile flags from REF into each of the MEMs found. If REF is not
1662 a MEM, don't do anything. */
1663
1664 void
1665 alpha_set_memflags (rtx insn, rtx ref)
1666 {
1667 rtx *base_ptr;
1668
1669 if (GET_CODE (ref) != MEM)
1670 return;
1671
1672 /* This is only called from alpha.md, after having had something
1673 generated from one of the insn patterns. So if everything is
1674 zero, the pattern is already up-to-date. */
1675 if (!MEM_VOLATILE_P (ref)
1676 && !MEM_IN_STRUCT_P (ref)
1677 && !MEM_SCALAR_P (ref)
1678 && !MEM_NOTRAP_P (ref)
1679 && !MEM_READONLY_P (ref))
1680 return;
1681
1682 if (INSN_P (insn))
1683 base_ptr = &PATTERN (insn);
1684 else
1685 base_ptr = &insn;
1686 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1687 }
1688 \f
1689 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1690 int, bool);
1691
1692 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1693 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1694 and return pc_rtx if successful. */
1695
1696 static rtx
1697 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1698 HOST_WIDE_INT c, int n, bool no_output)
1699 {
1700 HOST_WIDE_INT new;
1701 int i, bits;
1702 /* Use a pseudo if highly optimizing and still generating RTL. */
1703 rtx subtarget
1704 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1705 rtx temp, insn;
1706
1707 /* If this is a sign-extended 32-bit constant, we can do this in at most
1708 three insns, so do it if we have enough insns left. We always have
1709 a sign-extended 32-bit constant when compiling on a narrow machine. */
1710
1711 if (HOST_BITS_PER_WIDE_INT != 64
1712 || c >> 31 == -1 || c >> 31 == 0)
1713 {
1714 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1715 HOST_WIDE_INT tmp1 = c - low;
1716 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1717 HOST_WIDE_INT extra = 0;
1718
1719 /* If HIGH will be interpreted as negative but the constant is
1720 positive, we must adjust it to do two ldha insns. */
1721
1722 if ((high & 0x8000) != 0 && c >= 0)
1723 {
1724 extra = 0x4000;
1725 tmp1 -= 0x40000000;
1726 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1727 }
1728
1729 if (c == low || (low == 0 && extra == 0))
1730 {
1731 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1732 but that meant that we can't handle INT_MIN on 32-bit machines
1733 (like NT/Alpha), because we recurse indefinitely through
1734 emit_move_insn to gen_movdi. So instead, since we know exactly
1735 what we want, create it explicitly. */
1736
1737 if (no_output)
1738 return pc_rtx;
1739 if (target == NULL)
1740 target = gen_reg_rtx (mode);
1741 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1742 return target;
1743 }
1744 else if (n >= 2 + (extra != 0))
1745 {
1746 if (no_output)
1747 return pc_rtx;
1748 if (no_new_pseudos)
1749 {
1750 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1751 temp = target;
1752 }
1753 else
1754 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1755 subtarget, mode);
1756
1757 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1758 This means that if we go through expand_binop, we'll try to
1759 generate extensions, etc, which will require new pseudos, which
1760 will fail during some split phases. The SImode add patterns
1761 still exist, but are not named. So build the insns by hand. */
1762
1763 if (extra != 0)
1764 {
1765 if (! subtarget)
1766 subtarget = gen_reg_rtx (mode);
1767 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1768 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1769 emit_insn (insn);
1770 temp = subtarget;
1771 }
1772
1773 if (target == NULL)
1774 target = gen_reg_rtx (mode);
1775 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1776 insn = gen_rtx_SET (VOIDmode, target, insn);
1777 emit_insn (insn);
1778 return target;
1779 }
1780 }
1781
1782 /* If we couldn't do it that way, try some other methods. But if we have
1783 no instructions left, don't bother. Likewise, if this is SImode and
1784 we can't make pseudos, we can't do anything since the expand_binop
1785 and expand_unop calls will widen and try to make pseudos. */
1786
1787 if (n == 1 || (mode == SImode && no_new_pseudos))
1788 return 0;
1789
1790 /* Next, see if we can load a related constant and then shift and possibly
1791 negate it to get the constant we want. Try this once each increasing
1792 numbers of insns. */
1793
1794 for (i = 1; i < n; i++)
1795 {
1796 /* First, see if minus some low bits, we've an easy load of
1797 high bits. */
1798
1799 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1800 if (new != 0)
1801 {
1802 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1803 if (temp)
1804 {
1805 if (no_output)
1806 return temp;
1807 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1808 target, 0, OPTAB_WIDEN);
1809 }
1810 }
1811
1812 /* Next try complementing. */
1813 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1814 if (temp)
1815 {
1816 if (no_output)
1817 return temp;
1818 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1819 }
1820
1821 /* Next try to form a constant and do a left shift. We can do this
1822 if some low-order bits are zero; the exact_log2 call below tells
1823 us that information. The bits we are shifting out could be any
1824 value, but here we'll just try the 0- and sign-extended forms of
1825 the constant. To try to increase the chance of having the same
1826 constant in more than one insn, start at the highest number of
1827 bits to shift, but try all possibilities in case a ZAPNOT will
1828 be useful. */
1829
1830 bits = exact_log2 (c & -c);
1831 if (bits > 0)
1832 for (; bits > 0; bits--)
1833 {
1834 new = c >> bits;
1835 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1836 if (!temp && c < 0)
1837 {
1838 new = (unsigned HOST_WIDE_INT)c >> bits;
1839 temp = alpha_emit_set_const (subtarget, mode, new,
1840 i, no_output);
1841 }
1842 if (temp)
1843 {
1844 if (no_output)
1845 return temp;
1846 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1847 target, 0, OPTAB_WIDEN);
1848 }
1849 }
1850
1851 /* Now try high-order zero bits. Here we try the shifted-in bits as
1852 all zero and all ones. Be careful to avoid shifting outside the
1853 mode and to avoid shifting outside the host wide int size. */
1854 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1855 confuse the recursive call and set all of the high 32 bits. */
1856
1857 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1858 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1859 if (bits > 0)
1860 for (; bits > 0; bits--)
1861 {
1862 new = c << bits;
1863 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1864 if (!temp)
1865 {
1866 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1867 temp = alpha_emit_set_const (subtarget, mode, new,
1868 i, no_output);
1869 }
1870 if (temp)
1871 {
1872 if (no_output)
1873 return temp;
1874 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1875 target, 1, OPTAB_WIDEN);
1876 }
1877 }
1878
1879 /* Now try high-order 1 bits. We get that with a sign-extension.
1880 But one bit isn't enough here. Be careful to avoid shifting outside
1881 the mode and to avoid shifting outside the host wide int size. */
1882
1883 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1884 - floor_log2 (~ c) - 2);
1885 if (bits > 0)
1886 for (; bits > 0; bits--)
1887 {
1888 new = c << bits;
1889 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1890 if (!temp)
1891 {
1892 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1893 temp = alpha_emit_set_const (subtarget, mode, new,
1894 i, no_output);
1895 }
1896 if (temp)
1897 {
1898 if (no_output)
1899 return temp;
1900 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1901 target, 0, OPTAB_WIDEN);
1902 }
1903 }
1904 }
1905
1906 #if HOST_BITS_PER_WIDE_INT == 64
1907 /* Finally, see if can load a value into the target that is the same as the
1908 constant except that all bytes that are 0 are changed to be 0xff. If we
1909 can, then we can do a ZAPNOT to obtain the desired constant. */
1910
1911 new = c;
1912 for (i = 0; i < 64; i += 8)
1913 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1914 new |= (HOST_WIDE_INT) 0xff << i;
1915
1916 /* We are only called for SImode and DImode. If this is SImode, ensure that
1917 we are sign extended to a full word. */
1918
1919 if (mode == SImode)
1920 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1921
1922 if (new != c)
1923 {
1924 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1925 if (temp)
1926 {
1927 if (no_output)
1928 return temp;
1929 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1930 target, 0, OPTAB_WIDEN);
1931 }
1932 }
1933 #endif
1934
1935 return 0;
1936 }
1937
1938 /* Try to output insns to set TARGET equal to the constant C if it can be
1939 done in less than N insns. Do all computations in MODE. Returns the place
1940 where the output has been placed if it can be done and the insns have been
1941 emitted. If it would take more than N insns, zero is returned and no
1942 insns and emitted. */
1943
1944 static rtx
1945 alpha_emit_set_const (rtx target, enum machine_mode mode,
1946 HOST_WIDE_INT c, int n, bool no_output)
1947 {
1948 enum machine_mode orig_mode = mode;
1949 rtx orig_target = target;
1950 rtx result = 0;
1951 int i;
1952
1953 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1954 can't load this constant in one insn, do this in DImode. */
1955 if (no_new_pseudos && mode == SImode
1956 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1957 {
1958 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1959 if (result)
1960 return result;
1961
1962 target = no_output ? NULL : gen_lowpart (DImode, target);
1963 mode = DImode;
1964 }
1965 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1966 {
1967 target = no_output ? NULL : gen_lowpart (DImode, target);
1968 mode = DImode;
1969 }
1970
1971 /* Try 1 insn, then 2, then up to N. */
1972 for (i = 1; i <= n; i++)
1973 {
1974 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1975 if (result)
1976 {
1977 rtx insn, set;
1978
1979 if (no_output)
1980 return result;
1981
1982 insn = get_last_insn ();
1983 set = single_set (insn);
1984 if (! CONSTANT_P (SET_SRC (set)))
1985 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1986 break;
1987 }
1988 }
1989
1990 /* Allow for the case where we changed the mode of TARGET. */
1991 if (result)
1992 {
1993 if (result == target)
1994 result = orig_target;
1995 else if (mode != orig_mode)
1996 result = gen_lowpart (orig_mode, result);
1997 }
1998
1999 return result;
2000 }
2001
2002 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
2003 fall back to a straight forward decomposition. We do this to avoid
2004 exponential run times encountered when looking for longer sequences
2005 with alpha_emit_set_const. */
2006
2007 static rtx
2008 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
2009 {
2010 HOST_WIDE_INT d1, d2, d3, d4;
2011
2012 /* Decompose the entire word */
2013 #if HOST_BITS_PER_WIDE_INT >= 64
2014 if (c2 != -(c1 < 0))
2015 abort ();
2016 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2017 c1 -= d1;
2018 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2019 c1 = (c1 - d2) >> 32;
2020 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2021 c1 -= d3;
2022 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2023 if (c1 != d4)
2024 abort ();
2025 #else
2026 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2027 c1 -= d1;
2028 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2029 if (c1 != d2)
2030 abort ();
2031 c2 += (d2 < 0);
2032 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2033 c2 -= d3;
2034 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2035 if (c2 != d4)
2036 abort ();
2037 #endif
2038
2039 /* Construct the high word */
2040 if (d4)
2041 {
2042 emit_move_insn (target, GEN_INT (d4));
2043 if (d3)
2044 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2045 }
2046 else
2047 emit_move_insn (target, GEN_INT (d3));
2048
2049 /* Shift it into place */
2050 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2051
2052 /* Add in the low bits. */
2053 if (d2)
2054 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2055 if (d1)
2056 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2057
2058 return target;
2059 }
2060
2061 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2062 the low 64 bits. */
2063
2064 static void
2065 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2066 {
2067 HOST_WIDE_INT i0, i1;
2068
2069 if (GET_CODE (x) == CONST_VECTOR)
2070 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2071
2072
2073 if (GET_CODE (x) == CONST_INT)
2074 {
2075 i0 = INTVAL (x);
2076 i1 = -(i0 < 0);
2077 }
2078 else if (HOST_BITS_PER_WIDE_INT >= 64)
2079 {
2080 i0 = CONST_DOUBLE_LOW (x);
2081 i1 = -(i0 < 0);
2082 }
2083 else
2084 {
2085 i0 = CONST_DOUBLE_LOW (x);
2086 i1 = CONST_DOUBLE_HIGH (x);
2087 }
2088
2089 *p0 = i0;
2090 *p1 = i1;
2091 }
2092
2093 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2094 are willing to load the value into a register via a move pattern.
2095 Normally this is all symbolic constants, integral constants that
2096 take three or fewer instructions, and floating-point zero. */
2097
2098 bool
2099 alpha_legitimate_constant_p (rtx x)
2100 {
2101 enum machine_mode mode = GET_MODE (x);
2102 HOST_WIDE_INT i0, i1;
2103
2104 switch (GET_CODE (x))
2105 {
2106 case CONST:
2107 case LABEL_REF:
2108 case SYMBOL_REF:
2109 case HIGH:
2110 return true;
2111
2112 case CONST_DOUBLE:
2113 if (x == CONST0_RTX (mode))
2114 return true;
2115 if (FLOAT_MODE_P (mode))
2116 return false;
2117 goto do_integer;
2118
2119 case CONST_VECTOR:
2120 if (x == CONST0_RTX (mode))
2121 return true;
2122 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2123 return false;
2124 if (GET_MODE_SIZE (mode) != 8)
2125 return false;
2126 goto do_integer;
2127
2128 case CONST_INT:
2129 do_integer:
2130 if (TARGET_BUILD_CONSTANTS)
2131 return true;
2132 alpha_extract_integer (x, &i0, &i1);
2133 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2134 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2135 return false;
2136
2137 default:
2138 return false;
2139 }
2140 }
2141
2142 /* Operand 1 is known to be a constant, and should require more than one
2143 instruction to load. Emit that multi-part load. */
2144
2145 bool
2146 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2147 {
2148 HOST_WIDE_INT i0, i1;
2149 rtx temp = NULL_RTX;
2150
2151 alpha_extract_integer (operands[1], &i0, &i1);
2152
2153 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2154 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2155
2156 if (!temp && TARGET_BUILD_CONSTANTS)
2157 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2158
2159 if (temp)
2160 {
2161 if (!rtx_equal_p (operands[0], temp))
2162 emit_move_insn (operands[0], temp);
2163 return true;
2164 }
2165
2166 return false;
2167 }
2168
2169 /* Expand a move instruction; return true if all work is done.
2170 We don't handle non-bwx subword loads here. */
2171
2172 bool
2173 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2174 {
2175 /* If the output is not a register, the input must be. */
2176 if (GET_CODE (operands[0]) == MEM
2177 && ! reg_or_0_operand (operands[1], mode))
2178 operands[1] = force_reg (mode, operands[1]);
2179
2180 /* Allow legitimize_address to perform some simplifications. */
2181 if (mode == Pmode && symbolic_operand (operands[1], mode))
2182 {
2183 rtx tmp;
2184
2185 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2186 if (tmp)
2187 {
2188 if (tmp == operands[0])
2189 return true;
2190 operands[1] = tmp;
2191 return false;
2192 }
2193 }
2194
2195 /* Early out for non-constants and valid constants. */
2196 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2197 return false;
2198
2199 /* Split large integers. */
2200 if (GET_CODE (operands[1]) == CONST_INT
2201 || GET_CODE (operands[1]) == CONST_DOUBLE
2202 || GET_CODE (operands[1]) == CONST_VECTOR)
2203 {
2204 if (alpha_split_const_mov (mode, operands))
2205 return true;
2206 }
2207
2208 /* Otherwise we've nothing left but to drop the thing to memory. */
2209 operands[1] = force_const_mem (mode, operands[1]);
2210 if (reload_in_progress)
2211 {
2212 emit_move_insn (operands[0], XEXP (operands[1], 0));
2213 operands[1] = copy_rtx (operands[1]);
2214 XEXP (operands[1], 0) = operands[0];
2215 }
2216 else
2217 operands[1] = validize_mem (operands[1]);
2218 return false;
2219 }
2220
2221 /* Expand a non-bwx QImode or HImode move instruction;
2222 return true if all work is done. */
2223
2224 bool
2225 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2226 {
2227 /* If the output is not a register, the input must be. */
2228 if (GET_CODE (operands[0]) == MEM)
2229 operands[1] = force_reg (mode, operands[1]);
2230
2231 /* Handle four memory cases, unaligned and aligned for either the input
2232 or the output. The only case where we can be called during reload is
2233 for aligned loads; all other cases require temporaries. */
2234
2235 if (GET_CODE (operands[1]) == MEM
2236 || (GET_CODE (operands[1]) == SUBREG
2237 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2238 || (reload_in_progress && GET_CODE (operands[1]) == REG
2239 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2240 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2241 && GET_CODE (SUBREG_REG (operands[1])) == REG
2242 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2243 {
2244 if (aligned_memory_operand (operands[1], mode))
2245 {
2246 if (reload_in_progress)
2247 {
2248 emit_insn ((mode == QImode
2249 ? gen_reload_inqi_help
2250 : gen_reload_inhi_help)
2251 (operands[0], operands[1],
2252 gen_rtx_REG (SImode, REGNO (operands[0]))));
2253 }
2254 else
2255 {
2256 rtx aligned_mem, bitnum;
2257 rtx scratch = gen_reg_rtx (SImode);
2258 rtx subtarget;
2259 bool copyout;
2260
2261 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2262
2263 subtarget = operands[0];
2264 if (GET_CODE (subtarget) == REG)
2265 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2266 else
2267 subtarget = gen_reg_rtx (DImode), copyout = true;
2268
2269 emit_insn ((mode == QImode
2270 ? gen_aligned_loadqi
2271 : gen_aligned_loadhi)
2272 (subtarget, aligned_mem, bitnum, scratch));
2273
2274 if (copyout)
2275 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2276 }
2277 }
2278 else
2279 {
2280 /* Don't pass these as parameters since that makes the generated
2281 code depend on parameter evaluation order which will cause
2282 bootstrap failures. */
2283
2284 rtx temp1, temp2, seq, subtarget;
2285 bool copyout;
2286
2287 temp1 = gen_reg_rtx (DImode);
2288 temp2 = gen_reg_rtx (DImode);
2289
2290 subtarget = operands[0];
2291 if (GET_CODE (subtarget) == REG)
2292 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2293 else
2294 subtarget = gen_reg_rtx (DImode), copyout = true;
2295
2296 seq = ((mode == QImode
2297 ? gen_unaligned_loadqi
2298 : gen_unaligned_loadhi)
2299 (subtarget, get_unaligned_address (operands[1], 0),
2300 temp1, temp2));
2301 alpha_set_memflags (seq, operands[1]);
2302 emit_insn (seq);
2303
2304 if (copyout)
2305 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2306 }
2307 return true;
2308 }
2309
2310 if (GET_CODE (operands[0]) == MEM
2311 || (GET_CODE (operands[0]) == SUBREG
2312 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2313 || (reload_in_progress && GET_CODE (operands[0]) == REG
2314 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2315 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2316 && GET_CODE (SUBREG_REG (operands[0])) == REG
2317 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2318 {
2319 if (aligned_memory_operand (operands[0], mode))
2320 {
2321 rtx aligned_mem, bitnum;
2322 rtx temp1 = gen_reg_rtx (SImode);
2323 rtx temp2 = gen_reg_rtx (SImode);
2324
2325 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2326
2327 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2328 temp1, temp2));
2329 }
2330 else
2331 {
2332 rtx temp1 = gen_reg_rtx (DImode);
2333 rtx temp2 = gen_reg_rtx (DImode);
2334 rtx temp3 = gen_reg_rtx (DImode);
2335 rtx seq = ((mode == QImode
2336 ? gen_unaligned_storeqi
2337 : gen_unaligned_storehi)
2338 (get_unaligned_address (operands[0], 0),
2339 operands[1], temp1, temp2, temp3));
2340
2341 alpha_set_memflags (seq, operands[0]);
2342 emit_insn (seq);
2343 }
2344 return true;
2345 }
2346
2347 return false;
2348 }
2349
2350 /* Implement the movmisalign patterns. One of the operands is a memory
2351 that is not naturally aligned. Emit instructions to load it. */
2352
2353 void
2354 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2355 {
2356 /* Honor misaligned loads, for those we promised to do so. */
2357 if (MEM_P (operands[1]))
2358 {
2359 rtx tmp;
2360
2361 if (register_operand (operands[0], mode))
2362 tmp = operands[0];
2363 else
2364 tmp = gen_reg_rtx (mode);
2365
2366 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2367 if (tmp != operands[0])
2368 emit_move_insn (operands[0], tmp);
2369 }
2370 else if (MEM_P (operands[0]))
2371 {
2372 if (!reg_or_0_operand (operands[1], mode))
2373 operands[1] = force_reg (mode, operands[1]);
2374 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2375 }
2376 else
2377 gcc_unreachable ();
2378 }
2379
2380 /* Generate an unsigned DImode to FP conversion. This is the same code
2381 optabs would emit if we didn't have TFmode patterns.
2382
2383 For SFmode, this is the only construction I've found that can pass
2384 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2385 intermediates will work, because you'll get intermediate rounding
2386 that ruins the end result. Some of this could be fixed by turning
2387 on round-to-positive-infinity, but that requires diddling the fpsr,
2388 which kills performance. I tried turning this around and converting
2389 to a negative number, so that I could turn on /m, but either I did
2390 it wrong or there's something else cause I wound up with the exact
2391 same single-bit error. There is a branch-less form of this same code:
2392
2393 srl $16,1,$1
2394 and $16,1,$2
2395 cmplt $16,0,$3
2396 or $1,$2,$2
2397 cmovge $16,$16,$2
2398 itoft $3,$f10
2399 itoft $2,$f11
2400 cvtqs $f11,$f11
2401 adds $f11,$f11,$f0
2402 fcmoveq $f10,$f11,$f0
2403
2404 I'm not using it because it's the same number of instructions as
2405 this branch-full form, and it has more serialized long latency
2406 instructions on the critical path.
2407
2408 For DFmode, we can avoid rounding errors by breaking up the word
2409 into two pieces, converting them separately, and adding them back:
2410
2411 LC0: .long 0,0x5f800000
2412
2413 itoft $16,$f11
2414 lda $2,LC0
2415 cmplt $16,0,$1
2416 cpyse $f11,$f31,$f10
2417 cpyse $f31,$f11,$f11
2418 s4addq $1,$2,$1
2419 lds $f12,0($1)
2420 cvtqt $f10,$f10
2421 cvtqt $f11,$f11
2422 addt $f12,$f10,$f0
2423 addt $f0,$f11,$f0
2424
2425 This doesn't seem to be a clear-cut win over the optabs form.
2426 It probably all depends on the distribution of numbers being
2427 converted -- in the optabs form, all but high-bit-set has a
2428 much lower minimum execution time. */
2429
2430 void
2431 alpha_emit_floatuns (rtx operands[2])
2432 {
2433 rtx neglab, donelab, i0, i1, f0, in, out;
2434 enum machine_mode mode;
2435
2436 out = operands[0];
2437 in = force_reg (DImode, operands[1]);
2438 mode = GET_MODE (out);
2439 neglab = gen_label_rtx ();
2440 donelab = gen_label_rtx ();
2441 i0 = gen_reg_rtx (DImode);
2442 i1 = gen_reg_rtx (DImode);
2443 f0 = gen_reg_rtx (mode);
2444
2445 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2446
2447 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2448 emit_jump_insn (gen_jump (donelab));
2449 emit_barrier ();
2450
2451 emit_label (neglab);
2452
2453 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2454 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2455 emit_insn (gen_iordi3 (i0, i0, i1));
2456 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2457 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2458
2459 emit_label (donelab);
2460 }
2461
2462 /* Generate the comparison for a conditional branch. */
2463
2464 rtx
2465 alpha_emit_conditional_branch (enum rtx_code code)
2466 {
2467 enum rtx_code cmp_code, branch_code;
2468 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2469 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2470 rtx tem;
2471
2472 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2473 {
2474 if (! TARGET_HAS_XFLOATING_LIBS)
2475 abort ();
2476
2477 /* X_floating library comparison functions return
2478 -1 unordered
2479 0 false
2480 1 true
2481 Convert the compare against the raw return value. */
2482
2483 switch (code)
2484 {
2485 case UNORDERED:
2486 cmp_code = EQ;
2487 code = LT;
2488 break;
2489 case ORDERED:
2490 cmp_code = EQ;
2491 code = GE;
2492 break;
2493 case NE:
2494 cmp_code = NE;
2495 code = NE;
2496 break;
2497 default:
2498 cmp_code = code;
2499 code = GT;
2500 break;
2501 }
2502
2503 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2504 op1 = const0_rtx;
2505 alpha_compare.fp_p = 0;
2506 }
2507
2508 /* The general case: fold the comparison code to the types of compares
2509 that we have, choosing the branch as necessary. */
2510 switch (code)
2511 {
2512 case EQ: case LE: case LT: case LEU: case LTU:
2513 case UNORDERED:
2514 /* We have these compares: */
2515 cmp_code = code, branch_code = NE;
2516 break;
2517
2518 case NE:
2519 case ORDERED:
2520 /* These must be reversed. */
2521 cmp_code = reverse_condition (code), branch_code = EQ;
2522 break;
2523
2524 case GE: case GT: case GEU: case GTU:
2525 /* For FP, we swap them, for INT, we reverse them. */
2526 if (alpha_compare.fp_p)
2527 {
2528 cmp_code = swap_condition (code);
2529 branch_code = NE;
2530 tem = op0, op0 = op1, op1 = tem;
2531 }
2532 else
2533 {
2534 cmp_code = reverse_condition (code);
2535 branch_code = EQ;
2536 }
2537 break;
2538
2539 default:
2540 abort ();
2541 }
2542
2543 if (alpha_compare.fp_p)
2544 {
2545 cmp_mode = DFmode;
2546 if (flag_unsafe_math_optimizations)
2547 {
2548 /* When we are not as concerned about non-finite values, and we
2549 are comparing against zero, we can branch directly. */
2550 if (op1 == CONST0_RTX (DFmode))
2551 cmp_code = UNKNOWN, branch_code = code;
2552 else if (op0 == CONST0_RTX (DFmode))
2553 {
2554 /* Undo the swap we probably did just above. */
2555 tem = op0, op0 = op1, op1 = tem;
2556 branch_code = swap_condition (cmp_code);
2557 cmp_code = UNKNOWN;
2558 }
2559 }
2560 else
2561 {
2562 /* ??? We mark the branch mode to be CCmode to prevent the
2563 compare and branch from being combined, since the compare
2564 insn follows IEEE rules that the branch does not. */
2565 branch_mode = CCmode;
2566 }
2567 }
2568 else
2569 {
2570 cmp_mode = DImode;
2571
2572 /* The following optimizations are only for signed compares. */
2573 if (code != LEU && code != LTU && code != GEU && code != GTU)
2574 {
2575 /* Whee. Compare and branch against 0 directly. */
2576 if (op1 == const0_rtx)
2577 cmp_code = UNKNOWN, branch_code = code;
2578
2579 /* If the constants doesn't fit into an immediate, but can
2580 be generated by lda/ldah, we adjust the argument and
2581 compare against zero, so we can use beq/bne directly. */
2582 /* ??? Don't do this when comparing against symbols, otherwise
2583 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2584 be declared false out of hand (at least for non-weak). */
2585 else if (GET_CODE (op1) == CONST_INT
2586 && (code == EQ || code == NE)
2587 && !(symbolic_operand (op0, VOIDmode)
2588 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2589 {
2590 HOST_WIDE_INT v = INTVAL (op1), n = -v;
2591
2592 if (! CONST_OK_FOR_LETTER_P (v, 'I')
2593 && (CONST_OK_FOR_LETTER_P (n, 'K')
2594 || CONST_OK_FOR_LETTER_P (n, 'L')))
2595 {
2596 cmp_code = PLUS, branch_code = code;
2597 op1 = GEN_INT (n);
2598 }
2599 }
2600 }
2601
2602 if (!reg_or_0_operand (op0, DImode))
2603 op0 = force_reg (DImode, op0);
2604 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2605 op1 = force_reg (DImode, op1);
2606 }
2607
2608 /* Emit an initial compare instruction, if necessary. */
2609 tem = op0;
2610 if (cmp_code != UNKNOWN)
2611 {
2612 tem = gen_reg_rtx (cmp_mode);
2613 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2614 }
2615
2616 /* Zero the operands. */
2617 memset (&alpha_compare, 0, sizeof (alpha_compare));
2618
2619 /* Return the branch comparison. */
2620 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2621 }
2622
2623 /* Certain simplifications can be done to make invalid setcc operations
2624 valid. Return the final comparison, or NULL if we can't work. */
2625
2626 rtx
2627 alpha_emit_setcc (enum rtx_code code)
2628 {
2629 enum rtx_code cmp_code;
2630 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2631 int fp_p = alpha_compare.fp_p;
2632 rtx tmp;
2633
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2636
2637 if (fp_p && GET_MODE (op0) == TFmode)
2638 {
2639 if (! TARGET_HAS_XFLOATING_LIBS)
2640 abort ();
2641
2642 /* X_floating library comparison functions return
2643 -1 unordered
2644 0 false
2645 1 true
2646 Convert the compare against the raw return value. */
2647
2648 if (code == UNORDERED || code == ORDERED)
2649 cmp_code = EQ;
2650 else
2651 cmp_code = code;
2652
2653 op0 = alpha_emit_xfloating_compare (cmp_code, op0, op1);
2654 op1 = const0_rtx;
2655 fp_p = 0;
2656
2657 if (code == UNORDERED)
2658 code = LT;
2659 else if (code == ORDERED)
2660 code = GE;
2661 else
2662 code = GT;
2663 }
2664
2665 if (fp_p && !TARGET_FIX)
2666 return NULL_RTX;
2667
2668 /* The general case: fold the comparison code to the types of compares
2669 that we have, choosing the branch as necessary. */
2670
2671 cmp_code = UNKNOWN;
2672 switch (code)
2673 {
2674 case EQ: case LE: case LT: case LEU: case LTU:
2675 case UNORDERED:
2676 /* We have these compares. */
2677 if (fp_p)
2678 cmp_code = code, code = NE;
2679 break;
2680
2681 case NE:
2682 if (!fp_p && op1 == const0_rtx)
2683 break;
2684 /* FALLTHRU */
2685
2686 case ORDERED:
2687 cmp_code = reverse_condition (code);
2688 code = EQ;
2689 break;
2690
2691 case GE: case GT: case GEU: case GTU:
2692 /* These normally need swapping, but for integer zero we have
2693 special patterns that recognize swapped operands. */
2694 if (!fp_p && op1 == const0_rtx)
2695 break;
2696 code = swap_condition (code);
2697 if (fp_p)
2698 cmp_code = code, code = NE;
2699 tmp = op0, op0 = op1, op1 = tmp;
2700 break;
2701
2702 default:
2703 abort ();
2704 }
2705
2706 if (!fp_p)
2707 {
2708 if (!register_operand (op0, DImode))
2709 op0 = force_reg (DImode, op0);
2710 if (!reg_or_8bit_operand (op1, DImode))
2711 op1 = force_reg (DImode, op1);
2712 }
2713
2714 /* Emit an initial compare instruction, if necessary. */
2715 if (cmp_code != UNKNOWN)
2716 {
2717 enum machine_mode mode = fp_p ? DFmode : DImode;
2718
2719 tmp = gen_reg_rtx (mode);
2720 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2721 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2722
2723 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2724 op1 = const0_rtx;
2725 }
2726
2727 /* Return the setcc comparison. */
2728 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2729 }
2730
2731
2732 /* Rewrite a comparison against zero CMP of the form
2733 (CODE (cc0) (const_int 0)) so it can be written validly in
2734 a conditional move (if_then_else CMP ...).
2735 If both of the operands that set cc0 are nonzero we must emit
2736 an insn to perform the compare (it can't be done within
2737 the conditional move). */
2738
2739 rtx
2740 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2741 {
2742 enum rtx_code code = GET_CODE (cmp);
2743 enum rtx_code cmov_code = NE;
2744 rtx op0 = alpha_compare.op0;
2745 rtx op1 = alpha_compare.op1;
2746 int fp_p = alpha_compare.fp_p;
2747 enum machine_mode cmp_mode
2748 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2749 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2750 enum machine_mode cmov_mode = VOIDmode;
2751 int local_fast_math = flag_unsafe_math_optimizations;
2752 rtx tem;
2753
2754 /* Zero the operands. */
2755 memset (&alpha_compare, 0, sizeof (alpha_compare));
2756
2757 if (fp_p != FLOAT_MODE_P (mode))
2758 {
2759 enum rtx_code cmp_code;
2760
2761 if (! TARGET_FIX)
2762 return 0;
2763
2764 /* If we have fp<->int register move instructions, do a cmov by
2765 performing the comparison in fp registers, and move the
2766 zero/nonzero value to integer registers, where we can then
2767 use a normal cmov, or vice-versa. */
2768
2769 switch (code)
2770 {
2771 case EQ: case LE: case LT: case LEU: case LTU:
2772 /* We have these compares. */
2773 cmp_code = code, code = NE;
2774 break;
2775
2776 case NE:
2777 /* This must be reversed. */
2778 cmp_code = EQ, code = EQ;
2779 break;
2780
2781 case GE: case GT: case GEU: case GTU:
2782 /* These normally need swapping, but for integer zero we have
2783 special patterns that recognize swapped operands. */
2784 if (!fp_p && op1 == const0_rtx)
2785 cmp_code = code, code = NE;
2786 else
2787 {
2788 cmp_code = swap_condition (code);
2789 code = NE;
2790 tem = op0, op0 = op1, op1 = tem;
2791 }
2792 break;
2793
2794 default:
2795 abort ();
2796 }
2797
2798 tem = gen_reg_rtx (cmp_op_mode);
2799 emit_insn (gen_rtx_SET (VOIDmode, tem,
2800 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2801 op0, op1)));
2802
2803 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2804 op0 = gen_lowpart (cmp_op_mode, tem);
2805 op1 = CONST0_RTX (cmp_op_mode);
2806 fp_p = !fp_p;
2807 local_fast_math = 1;
2808 }
2809
2810 /* We may be able to use a conditional move directly.
2811 This avoids emitting spurious compares. */
2812 if (signed_comparison_operator (cmp, VOIDmode)
2813 && (!fp_p || local_fast_math)
2814 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2815 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2816
2817 /* We can't put the comparison inside the conditional move;
2818 emit a compare instruction and put that inside the
2819 conditional move. Make sure we emit only comparisons we have;
2820 swap or reverse as necessary. */
2821
2822 if (no_new_pseudos)
2823 return NULL_RTX;
2824
2825 switch (code)
2826 {
2827 case EQ: case LE: case LT: case LEU: case LTU:
2828 /* We have these compares: */
2829 break;
2830
2831 case NE:
2832 /* This must be reversed. */
2833 code = reverse_condition (code);
2834 cmov_code = EQ;
2835 break;
2836
2837 case GE: case GT: case GEU: case GTU:
2838 /* These must be swapped. */
2839 if (op1 != CONST0_RTX (cmp_mode))
2840 {
2841 code = swap_condition (code);
2842 tem = op0, op0 = op1, op1 = tem;
2843 }
2844 break;
2845
2846 default:
2847 abort ();
2848 }
2849
2850 if (!fp_p)
2851 {
2852 if (!reg_or_0_operand (op0, DImode))
2853 op0 = force_reg (DImode, op0);
2854 if (!reg_or_8bit_operand (op1, DImode))
2855 op1 = force_reg (DImode, op1);
2856 }
2857
2858 /* ??? We mark the branch mode to be CCmode to prevent the compare
2859 and cmov from being combined, since the compare insn follows IEEE
2860 rules that the cmov does not. */
2861 if (fp_p && !local_fast_math)
2862 cmov_mode = CCmode;
2863
2864 tem = gen_reg_rtx (cmp_op_mode);
2865 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2866 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2867 }
2868
2869 /* Simplify a conditional move of two constants into a setcc with
2870 arithmetic. This is done with a splitter since combine would
2871 just undo the work if done during code generation. It also catches
2872 cases we wouldn't have before cse. */
2873
2874 int
2875 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2876 rtx t_rtx, rtx f_rtx)
2877 {
2878 HOST_WIDE_INT t, f, diff;
2879 enum machine_mode mode;
2880 rtx target, subtarget, tmp;
2881
2882 mode = GET_MODE (dest);
2883 t = INTVAL (t_rtx);
2884 f = INTVAL (f_rtx);
2885 diff = t - f;
2886
2887 if (((code == NE || code == EQ) && diff < 0)
2888 || (code == GE || code == GT))
2889 {
2890 code = reverse_condition (code);
2891 diff = t, t = f, f = diff;
2892 diff = t - f;
2893 }
2894
2895 subtarget = target = dest;
2896 if (mode != DImode)
2897 {
2898 target = gen_lowpart (DImode, dest);
2899 if (! no_new_pseudos)
2900 subtarget = gen_reg_rtx (DImode);
2901 else
2902 subtarget = target;
2903 }
2904 /* Below, we must be careful to use copy_rtx on target and subtarget
2905 in intermediate insns, as they may be a subreg rtx, which may not
2906 be shared. */
2907
2908 if (f == 0 && exact_log2 (diff) > 0
2909 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2910 viable over a longer latency cmove. On EV5, the E0 slot is a
2911 scarce resource, and on EV4 shift has the same latency as a cmove. */
2912 && (diff <= 8 || alpha_cpu == PROCESSOR_EV6))
2913 {
2914 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2915 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2916
2917 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2918 GEN_INT (exact_log2 (t)));
2919 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2920 }
2921 else if (f == 0 && t == -1)
2922 {
2923 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2924 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2925
2926 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2927 }
2928 else if (diff == 1 || diff == 4 || diff == 8)
2929 {
2930 rtx add_op;
2931
2932 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2933 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2934
2935 if (diff == 1)
2936 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2937 else
2938 {
2939 add_op = GEN_INT (f);
2940 if (sext_add_operand (add_op, mode))
2941 {
2942 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2943 GEN_INT (diff));
2944 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2945 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2946 }
2947 else
2948 return 0;
2949 }
2950 }
2951 else
2952 return 0;
2953
2954 return 1;
2955 }
2956 \f
2957 /* Look up the function X_floating library function name for the
2958 given operation. */
2959
2960 struct xfloating_op GTY(())
2961 {
2962 const enum rtx_code code;
2963 const char *const GTY((skip)) osf_func;
2964 const char *const GTY((skip)) vms_func;
2965 rtx libcall;
2966 };
2967
2968 static GTY(()) struct xfloating_op xfloating_ops[] =
2969 {
2970 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2971 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2972 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2973 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2974 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2975 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2976 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2977 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2978 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2979 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2980 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2981 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2982 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2983 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2984 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2985 };
2986
2987 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2988 {
2989 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2990 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2991 };
2992
2993 static rtx
2994 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2995 {
2996 struct xfloating_op *ops = xfloating_ops;
2997 long n = ARRAY_SIZE (xfloating_ops);
2998 long i;
2999
3000 /* How irritating. Nothing to key off for the main table. */
3001 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
3002 {
3003 ops = vax_cvt_ops;
3004 n = ARRAY_SIZE (vax_cvt_ops);
3005 }
3006
3007 for (i = 0; i < n; ++i, ++ops)
3008 if (ops->code == code)
3009 {
3010 rtx func = ops->libcall;
3011 if (!func)
3012 {
3013 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
3014 ? ops->vms_func : ops->osf_func);
3015 ops->libcall = func;
3016 }
3017 return func;
3018 }
3019
3020 abort();
3021 }
3022
3023 /* Most X_floating operations take the rounding mode as an argument.
3024 Compute that here. */
3025
3026 static int
3027 alpha_compute_xfloating_mode_arg (enum rtx_code code,
3028 enum alpha_fp_rounding_mode round)
3029 {
3030 int mode;
3031
3032 switch (round)
3033 {
3034 case ALPHA_FPRM_NORM:
3035 mode = 2;
3036 break;
3037 case ALPHA_FPRM_MINF:
3038 mode = 1;
3039 break;
3040 case ALPHA_FPRM_CHOP:
3041 mode = 0;
3042 break;
3043 case ALPHA_FPRM_DYN:
3044 mode = 4;
3045 break;
3046 default:
3047 abort ();
3048
3049 /* XXX For reference, round to +inf is mode = 3. */
3050 }
3051
3052 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
3053 mode |= 0x10000;
3054
3055 return mode;
3056 }
3057
3058 /* Emit an X_floating library function call.
3059
3060 Note that these functions do not follow normal calling conventions:
3061 TFmode arguments are passed in two integer registers (as opposed to
3062 indirect); TFmode return values appear in R16+R17.
3063
3064 FUNC is the function to call.
3065 TARGET is where the output belongs.
3066 OPERANDS are the inputs.
3067 NOPERANDS is the count of inputs.
3068 EQUIV is the expression equivalent for the function.
3069 */
3070
3071 static void
3072 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3073 int noperands, rtx equiv)
3074 {
3075 rtx usage = NULL_RTX, tmp, reg;
3076 int regno = 16, i;
3077
3078 start_sequence ();
3079
3080 for (i = 0; i < noperands; ++i)
3081 {
3082 switch (GET_MODE (operands[i]))
3083 {
3084 case TFmode:
3085 reg = gen_rtx_REG (TFmode, regno);
3086 regno += 2;
3087 break;
3088
3089 case DFmode:
3090 reg = gen_rtx_REG (DFmode, regno + 32);
3091 regno += 1;
3092 break;
3093
3094 case VOIDmode:
3095 if (GET_CODE (operands[i]) != CONST_INT)
3096 abort ();
3097 /* FALLTHRU */
3098 case DImode:
3099 reg = gen_rtx_REG (DImode, regno);
3100 regno += 1;
3101 break;
3102
3103 default:
3104 abort ();
3105 }
3106
3107 emit_move_insn (reg, operands[i]);
3108 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3109 }
3110
3111 switch (GET_MODE (target))
3112 {
3113 case TFmode:
3114 reg = gen_rtx_REG (TFmode, 16);
3115 break;
3116 case DFmode:
3117 reg = gen_rtx_REG (DFmode, 32);
3118 break;
3119 case DImode:
3120 reg = gen_rtx_REG (DImode, 0);
3121 break;
3122 default:
3123 abort ();
3124 }
3125
3126 tmp = gen_rtx_MEM (QImode, func);
3127 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3128 const0_rtx, const0_rtx));
3129 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3130 CONST_OR_PURE_CALL_P (tmp) = 1;
3131
3132 tmp = get_insns ();
3133 end_sequence ();
3134
3135 emit_libcall_block (tmp, target, reg, equiv);
3136 }
3137
3138 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3139
3140 void
3141 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3142 {
3143 rtx func;
3144 int mode;
3145 rtx out_operands[3];
3146
3147 func = alpha_lookup_xfloating_lib_func (code);
3148 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3149
3150 out_operands[0] = operands[1];
3151 out_operands[1] = operands[2];
3152 out_operands[2] = GEN_INT (mode);
3153 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3154 gen_rtx_fmt_ee (code, TFmode, operands[1],
3155 operands[2]));
3156 }
3157
3158 /* Emit an X_floating library function call for a comparison. */
3159
3160 static rtx
3161 alpha_emit_xfloating_compare (enum rtx_code code, rtx op0, rtx op1)
3162 {
3163 rtx func;
3164 rtx out, operands[2];
3165
3166 func = alpha_lookup_xfloating_lib_func (code);
3167
3168 operands[0] = op0;
3169 operands[1] = op1;
3170 out = gen_reg_rtx (DImode);
3171
3172 /* ??? Strange mode for equiv because what's actually returned
3173 is -1,0,1, not a proper boolean value. */
3174 alpha_emit_xfloating_libcall (func, out, operands, 2,
3175 gen_rtx_fmt_ee (code, CCmode, op0, op1));
3176
3177 return out;
3178 }
3179
3180 /* Emit an X_floating library function call for a conversion. */
3181
3182 void
3183 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3184 {
3185 int noperands = 1, mode;
3186 rtx out_operands[2];
3187 rtx func;
3188 enum rtx_code code = orig_code;
3189
3190 if (code == UNSIGNED_FIX)
3191 code = FIX;
3192
3193 func = alpha_lookup_xfloating_lib_func (code);
3194
3195 out_operands[0] = operands[1];
3196
3197 switch (code)
3198 {
3199 case FIX:
3200 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3201 out_operands[1] = GEN_INT (mode);
3202 noperands = 2;
3203 break;
3204 case FLOAT_TRUNCATE:
3205 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3206 out_operands[1] = GEN_INT (mode);
3207 noperands = 2;
3208 break;
3209 default:
3210 break;
3211 }
3212
3213 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3214 gen_rtx_fmt_e (orig_code,
3215 GET_MODE (operands[0]),
3216 operands[1]));
3217 }
3218
3219 /* Split a TFmode OP[1] into DImode OP[2,3] and likewise for
3220 OP[0] into OP[0,1]. Naturally, output operand ordering is
3221 little-endian. */
3222
3223 void
3224 alpha_split_tfmode_pair (rtx operands[4])
3225 {
3226 if (GET_CODE (operands[1]) == REG)
3227 {
3228 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3229 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3230 }
3231 else if (GET_CODE (operands[1]) == MEM)
3232 {
3233 operands[3] = adjust_address (operands[1], DImode, 8);
3234 operands[2] = adjust_address (operands[1], DImode, 0);
3235 }
3236 else if (operands[1] == CONST0_RTX (TFmode))
3237 operands[2] = operands[3] = const0_rtx;
3238 else
3239 abort ();
3240
3241 if (GET_CODE (operands[0]) == REG)
3242 {
3243 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3244 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3245 }
3246 else if (GET_CODE (operands[0]) == MEM)
3247 {
3248 operands[1] = adjust_address (operands[0], DImode, 8);
3249 operands[0] = adjust_address (operands[0], DImode, 0);
3250 }
3251 else
3252 abort ();
3253 }
3254
3255 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3256 op2 is a register containing the sign bit, operation is the
3257 logical operation to be performed. */
3258
3259 void
3260 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3261 {
3262 rtx high_bit = operands[2];
3263 rtx scratch;
3264 int move;
3265
3266 alpha_split_tfmode_pair (operands);
3267
3268 /* Detect three flavors of operand overlap. */
3269 move = 1;
3270 if (rtx_equal_p (operands[0], operands[2]))
3271 move = 0;
3272 else if (rtx_equal_p (operands[1], operands[2]))
3273 {
3274 if (rtx_equal_p (operands[0], high_bit))
3275 move = 2;
3276 else
3277 move = -1;
3278 }
3279
3280 if (move < 0)
3281 emit_move_insn (operands[0], operands[2]);
3282
3283 /* ??? If the destination overlaps both source tf and high_bit, then
3284 assume source tf is dead in its entirety and use the other half
3285 for a scratch register. Otherwise "scratch" is just the proper
3286 destination register. */
3287 scratch = operands[move < 2 ? 1 : 3];
3288
3289 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3290
3291 if (move > 0)
3292 {
3293 emit_move_insn (operands[0], operands[2]);
3294 if (move > 1)
3295 emit_move_insn (operands[1], scratch);
3296 }
3297 }
3298 \f
3299 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3300 unaligned data:
3301
3302 unsigned: signed:
3303 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3304 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3305 lda r3,X(r11) lda r3,X+2(r11)
3306 extwl r1,r3,r1 extql r1,r3,r1
3307 extwh r2,r3,r2 extqh r2,r3,r2
3308 or r1.r2.r1 or r1,r2,r1
3309 sra r1,48,r1
3310
3311 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3312 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3313 lda r3,X(r11) lda r3,X(r11)
3314 extll r1,r3,r1 extll r1,r3,r1
3315 extlh r2,r3,r2 extlh r2,r3,r2
3316 or r1.r2.r1 addl r1,r2,r1
3317
3318 quad: ldq_u r1,X(r11)
3319 ldq_u r2,X+7(r11)
3320 lda r3,X(r11)
3321 extql r1,r3,r1
3322 extqh r2,r3,r2
3323 or r1.r2.r1
3324 */
3325
3326 void
3327 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3328 HOST_WIDE_INT ofs, int sign)
3329 {
3330 rtx meml, memh, addr, extl, exth, tmp, mema;
3331 enum machine_mode mode;
3332
3333 if (TARGET_BWX && size == 2)
3334 {
3335 meml = adjust_address (mem, QImode, ofs);
3336 memh = adjust_address (mem, QImode, ofs+1);
3337 if (BYTES_BIG_ENDIAN)
3338 tmp = meml, meml = memh, memh = tmp;
3339 extl = gen_reg_rtx (DImode);
3340 exth = gen_reg_rtx (DImode);
3341 emit_insn (gen_zero_extendqidi2 (extl, meml));
3342 emit_insn (gen_zero_extendqidi2 (exth, memh));
3343 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3344 NULL, 1, OPTAB_LIB_WIDEN);
3345 addr = expand_simple_binop (DImode, IOR, extl, exth,
3346 NULL, 1, OPTAB_LIB_WIDEN);
3347
3348 if (sign && GET_MODE (tgt) != HImode)
3349 {
3350 addr = gen_lowpart (HImode, addr);
3351 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3352 }
3353 else
3354 {
3355 if (GET_MODE (tgt) != DImode)
3356 addr = gen_lowpart (GET_MODE (tgt), addr);
3357 emit_move_insn (tgt, addr);
3358 }
3359 return;
3360 }
3361
3362 meml = gen_reg_rtx (DImode);
3363 memh = gen_reg_rtx (DImode);
3364 addr = gen_reg_rtx (DImode);
3365 extl = gen_reg_rtx (DImode);
3366 exth = gen_reg_rtx (DImode);
3367
3368 mema = XEXP (mem, 0);
3369 if (GET_CODE (mema) == LO_SUM)
3370 mema = force_reg (Pmode, mema);
3371
3372 /* AND addresses cannot be in any alias set, since they may implicitly
3373 alias surrounding code. Ideally we'd have some alias set that
3374 covered all types except those with alignment 8 or higher. */
3375
3376 tmp = change_address (mem, DImode,
3377 gen_rtx_AND (DImode,
3378 plus_constant (mema, ofs),
3379 GEN_INT (-8)));
3380 set_mem_alias_set (tmp, 0);
3381 emit_move_insn (meml, tmp);
3382
3383 tmp = change_address (mem, DImode,
3384 gen_rtx_AND (DImode,
3385 plus_constant (mema, ofs + size - 1),
3386 GEN_INT (-8)));
3387 set_mem_alias_set (tmp, 0);
3388 emit_move_insn (memh, tmp);
3389
3390 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3391 {
3392 emit_move_insn (addr, plus_constant (mema, -1));
3393
3394 emit_insn (gen_extqh_be (extl, meml, addr));
3395 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3396
3397 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3398 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3399 addr, 1, OPTAB_WIDEN);
3400 }
3401 else if (sign && size == 2)
3402 {
3403 emit_move_insn (addr, plus_constant (mema, ofs+2));
3404
3405 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3406 emit_insn (gen_extqh_le (exth, memh, addr));
3407
3408 /* We must use tgt here for the target. Alpha-vms port fails if we use
3409 addr for the target, because addr is marked as a pointer and combine
3410 knows that pointers are always sign-extended 32 bit values. */
3411 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3412 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3413 addr, 1, OPTAB_WIDEN);
3414 }
3415 else
3416 {
3417 if (WORDS_BIG_ENDIAN)
3418 {
3419 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3420 switch ((int) size)
3421 {
3422 case 2:
3423 emit_insn (gen_extwh_be (extl, meml, addr));
3424 mode = HImode;
3425 break;
3426
3427 case 4:
3428 emit_insn (gen_extlh_be (extl, meml, addr));
3429 mode = SImode;
3430 break;
3431
3432 case 8:
3433 emit_insn (gen_extqh_be (extl, meml, addr));
3434 mode = DImode;
3435 break;
3436
3437 default:
3438 abort ();
3439 }
3440 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3441 }
3442 else
3443 {
3444 emit_move_insn (addr, plus_constant (mema, ofs));
3445 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3446 switch ((int) size)
3447 {
3448 case 2:
3449 emit_insn (gen_extwh_le (exth, memh, addr));
3450 mode = HImode;
3451 break;
3452
3453 case 4:
3454 emit_insn (gen_extlh_le (exth, memh, addr));
3455 mode = SImode;
3456 break;
3457
3458 case 8:
3459 emit_insn (gen_extqh_le (exth, memh, addr));
3460 mode = DImode;
3461 break;
3462
3463 default:
3464 abort();
3465 }
3466 }
3467
3468 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3469 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3470 sign, OPTAB_WIDEN);
3471 }
3472
3473 if (addr != tgt)
3474 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3475 }
3476
3477 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3478
3479 void
3480 alpha_expand_unaligned_store (rtx dst, rtx src,
3481 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3482 {
3483 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3484
3485 if (TARGET_BWX && size == 2)
3486 {
3487 if (src != const0_rtx)
3488 {
3489 dstl = gen_lowpart (QImode, src);
3490 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3491 NULL, 1, OPTAB_LIB_WIDEN);
3492 dsth = gen_lowpart (QImode, dsth);
3493 }
3494 else
3495 dstl = dsth = const0_rtx;
3496
3497 meml = adjust_address (dst, QImode, ofs);
3498 memh = adjust_address (dst, QImode, ofs+1);
3499 if (BYTES_BIG_ENDIAN)
3500 addr = meml, meml = memh, memh = addr;
3501
3502 emit_move_insn (meml, dstl);
3503 emit_move_insn (memh, dsth);
3504 return;
3505 }
3506
3507 dstl = gen_reg_rtx (DImode);
3508 dsth = gen_reg_rtx (DImode);
3509 insl = gen_reg_rtx (DImode);
3510 insh = gen_reg_rtx (DImode);
3511
3512 dsta = XEXP (dst, 0);
3513 if (GET_CODE (dsta) == LO_SUM)
3514 dsta = force_reg (Pmode, dsta);
3515
3516 /* AND addresses cannot be in any alias set, since they may implicitly
3517 alias surrounding code. Ideally we'd have some alias set that
3518 covered all types except those with alignment 8 or higher. */
3519
3520 meml = change_address (dst, DImode,
3521 gen_rtx_AND (DImode,
3522 plus_constant (dsta, ofs),
3523 GEN_INT (-8)));
3524 set_mem_alias_set (meml, 0);
3525
3526 memh = change_address (dst, DImode,
3527 gen_rtx_AND (DImode,
3528 plus_constant (dsta, ofs + size - 1),
3529 GEN_INT (-8)));
3530 set_mem_alias_set (memh, 0);
3531
3532 emit_move_insn (dsth, memh);
3533 emit_move_insn (dstl, meml);
3534 if (WORDS_BIG_ENDIAN)
3535 {
3536 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3537
3538 if (src != const0_rtx)
3539 {
3540 switch ((int) size)
3541 {
3542 case 2:
3543 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3544 break;
3545 case 4:
3546 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3547 break;
3548 case 8:
3549 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3550 break;
3551 }
3552 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3553 GEN_INT (size*8), addr));
3554 }
3555
3556 switch ((int) size)
3557 {
3558 case 2:
3559 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3560 break;
3561 case 4:
3562 {
3563 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3564 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3565 break;
3566 }
3567 case 8:
3568 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3569 break;
3570 }
3571
3572 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3573 }
3574 else
3575 {
3576 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3577
3578 if (src != CONST0_RTX (GET_MODE (src)))
3579 {
3580 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3581 GEN_INT (size*8), addr));
3582
3583 switch ((int) size)
3584 {
3585 case 2:
3586 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3587 break;
3588 case 4:
3589 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3590 break;
3591 case 8:
3592 emit_insn (gen_insql_le (insl, src, addr));
3593 break;
3594 }
3595 }
3596
3597 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3598
3599 switch ((int) size)
3600 {
3601 case 2:
3602 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3603 break;
3604 case 4:
3605 {
3606 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3607 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3608 break;
3609 }
3610 case 8:
3611 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3612 break;
3613 }
3614 }
3615
3616 if (src != CONST0_RTX (GET_MODE (src)))
3617 {
3618 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3619 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3620 }
3621
3622 if (WORDS_BIG_ENDIAN)
3623 {
3624 emit_move_insn (meml, dstl);
3625 emit_move_insn (memh, dsth);
3626 }
3627 else
3628 {
3629 /* Must store high before low for degenerate case of aligned. */
3630 emit_move_insn (memh, dsth);
3631 emit_move_insn (meml, dstl);
3632 }
3633 }
3634
3635 /* The block move code tries to maximize speed by separating loads and
3636 stores at the expense of register pressure: we load all of the data
3637 before we store it back out. There are two secondary effects worth
3638 mentioning, that this speeds copying to/from aligned and unaligned
3639 buffers, and that it makes the code significantly easier to write. */
3640
3641 #define MAX_MOVE_WORDS 8
3642
3643 /* Load an integral number of consecutive unaligned quadwords. */
3644
3645 static void
3646 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3647 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3648 {
3649 rtx const im8 = GEN_INT (-8);
3650 rtx const i64 = GEN_INT (64);
3651 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3652 rtx sreg, areg, tmp, smema;
3653 HOST_WIDE_INT i;
3654
3655 smema = XEXP (smem, 0);
3656 if (GET_CODE (smema) == LO_SUM)
3657 smema = force_reg (Pmode, smema);
3658
3659 /* Generate all the tmp registers we need. */
3660 for (i = 0; i < words; ++i)
3661 {
3662 data_regs[i] = out_regs[i];
3663 ext_tmps[i] = gen_reg_rtx (DImode);
3664 }
3665 data_regs[words] = gen_reg_rtx (DImode);
3666
3667 if (ofs != 0)
3668 smem = adjust_address (smem, GET_MODE (smem), ofs);
3669
3670 /* Load up all of the source data. */
3671 for (i = 0; i < words; ++i)
3672 {
3673 tmp = change_address (smem, DImode,
3674 gen_rtx_AND (DImode,
3675 plus_constant (smema, 8*i),
3676 im8));
3677 set_mem_alias_set (tmp, 0);
3678 emit_move_insn (data_regs[i], tmp);
3679 }
3680
3681 tmp = change_address (smem, DImode,
3682 gen_rtx_AND (DImode,
3683 plus_constant (smema, 8*words - 1),
3684 im8));
3685 set_mem_alias_set (tmp, 0);
3686 emit_move_insn (data_regs[words], tmp);
3687
3688 /* Extract the half-word fragments. Unfortunately DEC decided to make
3689 extxh with offset zero a noop instead of zeroing the register, so
3690 we must take care of that edge condition ourselves with cmov. */
3691
3692 sreg = copy_addr_to_reg (smema);
3693 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3694 1, OPTAB_WIDEN);
3695 if (WORDS_BIG_ENDIAN)
3696 emit_move_insn (sreg, plus_constant (sreg, 7));
3697 for (i = 0; i < words; ++i)
3698 {
3699 if (WORDS_BIG_ENDIAN)
3700 {
3701 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3702 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3703 }
3704 else
3705 {
3706 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3707 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3708 }
3709 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3710 gen_rtx_IF_THEN_ELSE (DImode,
3711 gen_rtx_EQ (DImode, areg,
3712 const0_rtx),
3713 const0_rtx, ext_tmps[i])));
3714 }
3715
3716 /* Merge the half-words into whole words. */
3717 for (i = 0; i < words; ++i)
3718 {
3719 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3720 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3721 }
3722 }
3723
3724 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3725 may be NULL to store zeros. */
3726
3727 static void
3728 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3729 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3730 {
3731 rtx const im8 = GEN_INT (-8);
3732 rtx const i64 = GEN_INT (64);
3733 rtx ins_tmps[MAX_MOVE_WORDS];
3734 rtx st_tmp_1, st_tmp_2, dreg;
3735 rtx st_addr_1, st_addr_2, dmema;
3736 HOST_WIDE_INT i;
3737
3738 dmema = XEXP (dmem, 0);
3739 if (GET_CODE (dmema) == LO_SUM)
3740 dmema = force_reg (Pmode, dmema);
3741
3742 /* Generate all the tmp registers we need. */
3743 if (data_regs != NULL)
3744 for (i = 0; i < words; ++i)
3745 ins_tmps[i] = gen_reg_rtx(DImode);
3746 st_tmp_1 = gen_reg_rtx(DImode);
3747 st_tmp_2 = gen_reg_rtx(DImode);
3748
3749 if (ofs != 0)
3750 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3751
3752 st_addr_2 = change_address (dmem, DImode,
3753 gen_rtx_AND (DImode,
3754 plus_constant (dmema, words*8 - 1),
3755 im8));
3756 set_mem_alias_set (st_addr_2, 0);
3757
3758 st_addr_1 = change_address (dmem, DImode,
3759 gen_rtx_AND (DImode, dmema, im8));
3760 set_mem_alias_set (st_addr_1, 0);
3761
3762 /* Load up the destination end bits. */
3763 emit_move_insn (st_tmp_2, st_addr_2);
3764 emit_move_insn (st_tmp_1, st_addr_1);
3765
3766 /* Shift the input data into place. */
3767 dreg = copy_addr_to_reg (dmema);
3768 if (WORDS_BIG_ENDIAN)
3769 emit_move_insn (dreg, plus_constant (dreg, 7));
3770 if (data_regs != NULL)
3771 {
3772 for (i = words-1; i >= 0; --i)
3773 {
3774 if (WORDS_BIG_ENDIAN)
3775 {
3776 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3777 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3778 }
3779 else
3780 {
3781 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3782 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3783 }
3784 }
3785 for (i = words-1; i > 0; --i)
3786 {
3787 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3788 ins_tmps[i-1], ins_tmps[i-1], 1,
3789 OPTAB_WIDEN);
3790 }
3791 }
3792
3793 /* Split and merge the ends with the destination data. */
3794 if (WORDS_BIG_ENDIAN)
3795 {
3796 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3797 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3798 }
3799 else
3800 {
3801 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3802 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3803 }
3804
3805 if (data_regs != NULL)
3806 {
3807 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3808 st_tmp_2, 1, OPTAB_WIDEN);
3809 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3810 st_tmp_1, 1, OPTAB_WIDEN);
3811 }
3812
3813 /* Store it all. */
3814 if (WORDS_BIG_ENDIAN)
3815 emit_move_insn (st_addr_1, st_tmp_1);
3816 else
3817 emit_move_insn (st_addr_2, st_tmp_2);
3818 for (i = words-1; i > 0; --i)
3819 {
3820 rtx tmp = change_address (dmem, DImode,
3821 gen_rtx_AND (DImode,
3822 plus_constant(dmema,
3823 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3824 im8));
3825 set_mem_alias_set (tmp, 0);
3826 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3827 }
3828 if (WORDS_BIG_ENDIAN)
3829 emit_move_insn (st_addr_2, st_tmp_2);
3830 else
3831 emit_move_insn (st_addr_1, st_tmp_1);
3832 }
3833
3834
3835 /* Expand string/block move operations.
3836
3837 operands[0] is the pointer to the destination.
3838 operands[1] is the pointer to the source.
3839 operands[2] is the number of bytes to move.
3840 operands[3] is the alignment. */
3841
3842 int
3843 alpha_expand_block_move (rtx operands[])
3844 {
3845 rtx bytes_rtx = operands[2];
3846 rtx align_rtx = operands[3];
3847 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3848 HOST_WIDE_INT bytes = orig_bytes;
3849 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3850 HOST_WIDE_INT dst_align = src_align;
3851 rtx orig_src = operands[1];
3852 rtx orig_dst = operands[0];
3853 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3854 rtx tmp;
3855 unsigned int i, words, ofs, nregs = 0;
3856
3857 if (orig_bytes <= 0)
3858 return 1;
3859 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3860 return 0;
3861
3862 /* Look for additional alignment information from recorded register info. */
3863
3864 tmp = XEXP (orig_src, 0);
3865 if (GET_CODE (tmp) == REG)
3866 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3867 else if (GET_CODE (tmp) == PLUS
3868 && GET_CODE (XEXP (tmp, 0)) == REG
3869 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3870 {
3871 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3872 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3873
3874 if (a > src_align)
3875 {
3876 if (a >= 64 && c % 8 == 0)
3877 src_align = 64;
3878 else if (a >= 32 && c % 4 == 0)
3879 src_align = 32;
3880 else if (a >= 16 && c % 2 == 0)
3881 src_align = 16;
3882 }
3883 }
3884
3885 tmp = XEXP (orig_dst, 0);
3886 if (GET_CODE (tmp) == REG)
3887 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3888 else if (GET_CODE (tmp) == PLUS
3889 && GET_CODE (XEXP (tmp, 0)) == REG
3890 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3891 {
3892 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3893 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3894
3895 if (a > dst_align)
3896 {
3897 if (a >= 64 && c % 8 == 0)
3898 dst_align = 64;
3899 else if (a >= 32 && c % 4 == 0)
3900 dst_align = 32;
3901 else if (a >= 16 && c % 2 == 0)
3902 dst_align = 16;
3903 }
3904 }
3905
3906 ofs = 0;
3907 if (src_align >= 64 && bytes >= 8)
3908 {
3909 words = bytes / 8;
3910
3911 for (i = 0; i < words; ++i)
3912 data_regs[nregs + i] = gen_reg_rtx (DImode);
3913
3914 for (i = 0; i < words; ++i)
3915 emit_move_insn (data_regs[nregs + i],
3916 adjust_address (orig_src, DImode, ofs + i * 8));
3917
3918 nregs += words;
3919 bytes -= words * 8;
3920 ofs += words * 8;
3921 }
3922
3923 if (src_align >= 32 && bytes >= 4)
3924 {
3925 words = bytes / 4;
3926
3927 for (i = 0; i < words; ++i)
3928 data_regs[nregs + i] = gen_reg_rtx (SImode);
3929
3930 for (i = 0; i < words; ++i)
3931 emit_move_insn (data_regs[nregs + i],
3932 adjust_address (orig_src, SImode, ofs + i * 4));
3933
3934 nregs += words;
3935 bytes -= words * 4;
3936 ofs += words * 4;
3937 }
3938
3939 if (bytes >= 8)
3940 {
3941 words = bytes / 8;
3942
3943 for (i = 0; i < words+1; ++i)
3944 data_regs[nregs + i] = gen_reg_rtx (DImode);
3945
3946 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3947 words, ofs);
3948
3949 nregs += words;
3950 bytes -= words * 8;
3951 ofs += words * 8;
3952 }
3953
3954 if (! TARGET_BWX && bytes >= 4)
3955 {
3956 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3957 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3958 bytes -= 4;
3959 ofs += 4;
3960 }
3961
3962 if (bytes >= 2)
3963 {
3964 if (src_align >= 16)
3965 {
3966 do {
3967 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3968 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3969 bytes -= 2;
3970 ofs += 2;
3971 } while (bytes >= 2);
3972 }
3973 else if (! TARGET_BWX)
3974 {
3975 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3976 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3977 bytes -= 2;
3978 ofs += 2;
3979 }
3980 }
3981
3982 while (bytes > 0)
3983 {
3984 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3985 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3986 bytes -= 1;
3987 ofs += 1;
3988 }
3989
3990 if (nregs > ARRAY_SIZE (data_regs))
3991 abort ();
3992
3993 /* Now save it back out again. */
3994
3995 i = 0, ofs = 0;
3996
3997 /* Write out the data in whatever chunks reading the source allowed. */
3998 if (dst_align >= 64)
3999 {
4000 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4001 {
4002 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
4003 data_regs[i]);
4004 ofs += 8;
4005 i++;
4006 }
4007 }
4008
4009 if (dst_align >= 32)
4010 {
4011 /* If the source has remaining DImode regs, write them out in
4012 two pieces. */
4013 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4014 {
4015 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4016 NULL_RTX, 1, OPTAB_WIDEN);
4017
4018 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4019 gen_lowpart (SImode, data_regs[i]));
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4021 gen_lowpart (SImode, tmp));
4022 ofs += 8;
4023 i++;
4024 }
4025
4026 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4027 {
4028 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4029 data_regs[i]);
4030 ofs += 4;
4031 i++;
4032 }
4033 }
4034
4035 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4036 {
4037 /* Write out a remaining block of words using unaligned methods. */
4038
4039 for (words = 1; i + words < nregs; words++)
4040 if (GET_MODE (data_regs[i + words]) != DImode)
4041 break;
4042
4043 if (words == 1)
4044 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4045 else
4046 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4047 words, ofs);
4048
4049 i += words;
4050 ofs += words * 8;
4051 }
4052
4053 /* Due to the above, this won't be aligned. */
4054 /* ??? If we have more than one of these, consider constructing full
4055 words in registers and using alpha_expand_unaligned_store_words. */
4056 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4057 {
4058 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4059 ofs += 4;
4060 i++;
4061 }
4062
4063 if (dst_align >= 16)
4064 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4065 {
4066 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 2;
4069 }
4070 else
4071 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4072 {
4073 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4074 i++;
4075 ofs += 2;
4076 }
4077
4078 while (i < nregs && GET_MODE (data_regs[i]) == QImode)
4079 {
4080 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4081 i++;
4082 ofs += 1;
4083 }
4084
4085 if (i != nregs)
4086 abort ();
4087
4088 return 1;
4089 }
4090
4091 int
4092 alpha_expand_block_clear (rtx operands[])
4093 {
4094 rtx bytes_rtx = operands[1];
4095 rtx align_rtx = operands[2];
4096 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4097 HOST_WIDE_INT bytes = orig_bytes;
4098 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4099 HOST_WIDE_INT alignofs = 0;
4100 rtx orig_dst = operands[0];
4101 rtx tmp;
4102 int i, words, ofs = 0;
4103
4104 if (orig_bytes <= 0)
4105 return 1;
4106 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4107 return 0;
4108
4109 /* Look for stricter alignment. */
4110 tmp = XEXP (orig_dst, 0);
4111 if (GET_CODE (tmp) == REG)
4112 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4113 else if (GET_CODE (tmp) == PLUS
4114 && GET_CODE (XEXP (tmp, 0)) == REG
4115 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4116 {
4117 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4118 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4119
4120 if (a > align)
4121 {
4122 if (a >= 64)
4123 align = a, alignofs = 8 - c % 8;
4124 else if (a >= 32)
4125 align = a, alignofs = 4 - c % 4;
4126 else if (a >= 16)
4127 align = a, alignofs = 2 - c % 2;
4128 }
4129 }
4130
4131 /* Handle an unaligned prefix first. */
4132
4133 if (alignofs > 0)
4134 {
4135 #if HOST_BITS_PER_WIDE_INT >= 64
4136 /* Given that alignofs is bounded by align, the only time BWX could
4137 generate three stores is for a 7 byte fill. Prefer two individual
4138 stores over a load/mask/store sequence. */
4139 if ((!TARGET_BWX || alignofs == 7)
4140 && align >= 32
4141 && !(alignofs == 4 && bytes >= 4))
4142 {
4143 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4144 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4145 rtx mem, tmp;
4146 HOST_WIDE_INT mask;
4147
4148 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4149 set_mem_alias_set (mem, 0);
4150
4151 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4152 if (bytes < alignofs)
4153 {
4154 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4155 ofs += bytes;
4156 bytes = 0;
4157 }
4158 else
4159 {
4160 bytes -= alignofs;
4161 ofs += alignofs;
4162 }
4163 alignofs = 0;
4164
4165 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4166 NULL_RTX, 1, OPTAB_WIDEN);
4167
4168 emit_move_insn (mem, tmp);
4169 }
4170 #endif
4171
4172 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4173 {
4174 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4175 bytes -= 1;
4176 ofs += 1;
4177 alignofs -= 1;
4178 }
4179 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4180 {
4181 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4182 bytes -= 2;
4183 ofs += 2;
4184 alignofs -= 2;
4185 }
4186 if (alignofs == 4 && bytes >= 4)
4187 {
4188 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4189 bytes -= 4;
4190 ofs += 4;
4191 alignofs = 0;
4192 }
4193
4194 /* If we've not used the extra lead alignment information by now,
4195 we won't be able to. Downgrade align to match what's left over. */
4196 if (alignofs > 0)
4197 {
4198 alignofs = alignofs & -alignofs;
4199 align = MIN (align, alignofs * BITS_PER_UNIT);
4200 }
4201 }
4202
4203 /* Handle a block of contiguous long-words. */
4204
4205 if (align >= 64 && bytes >= 8)
4206 {
4207 words = bytes / 8;
4208
4209 for (i = 0; i < words; ++i)
4210 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4211 const0_rtx);
4212
4213 bytes -= words * 8;
4214 ofs += words * 8;
4215 }
4216
4217 /* If the block is large and appropriately aligned, emit a single
4218 store followed by a sequence of stq_u insns. */
4219
4220 if (align >= 32 && bytes > 16)
4221 {
4222 rtx orig_dsta;
4223
4224 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4225 bytes -= 4;
4226 ofs += 4;
4227
4228 orig_dsta = XEXP (orig_dst, 0);
4229 if (GET_CODE (orig_dsta) == LO_SUM)
4230 orig_dsta = force_reg (Pmode, orig_dsta);
4231
4232 words = bytes / 8;
4233 for (i = 0; i < words; ++i)
4234 {
4235 rtx mem
4236 = change_address (orig_dst, DImode,
4237 gen_rtx_AND (DImode,
4238 plus_constant (orig_dsta, ofs + i*8),
4239 GEN_INT (-8)));
4240 set_mem_alias_set (mem, 0);
4241 emit_move_insn (mem, const0_rtx);
4242 }
4243
4244 /* Depending on the alignment, the first stq_u may have overlapped
4245 with the initial stl, which means that the last stq_u didn't
4246 write as much as it would appear. Leave those questionable bytes
4247 unaccounted for. */
4248 bytes -= words * 8 - 4;
4249 ofs += words * 8 - 4;
4250 }
4251
4252 /* Handle a smaller block of aligned words. */
4253
4254 if ((align >= 64 && bytes == 4)
4255 || (align == 32 && bytes >= 4))
4256 {
4257 words = bytes / 4;
4258
4259 for (i = 0; i < words; ++i)
4260 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4261 const0_rtx);
4262
4263 bytes -= words * 4;
4264 ofs += words * 4;
4265 }
4266
4267 /* An unaligned block uses stq_u stores for as many as possible. */
4268
4269 if (bytes >= 8)
4270 {
4271 words = bytes / 8;
4272
4273 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4274
4275 bytes -= words * 8;
4276 ofs += words * 8;
4277 }
4278
4279 /* Next clean up any trailing pieces. */
4280
4281 #if HOST_BITS_PER_WIDE_INT >= 64
4282 /* Count the number of bits in BYTES for which aligned stores could
4283 be emitted. */
4284 words = 0;
4285 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4286 if (bytes & i)
4287 words += 1;
4288
4289 /* If we have appropriate alignment (and it wouldn't take too many
4290 instructions otherwise), mask out the bytes we need. */
4291 if (TARGET_BWX ? words > 2 : bytes > 0)
4292 {
4293 if (align >= 64)
4294 {
4295 rtx mem, tmp;
4296 HOST_WIDE_INT mask;
4297
4298 mem = adjust_address (orig_dst, DImode, ofs);
4299 set_mem_alias_set (mem, 0);
4300
4301 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4302
4303 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4304 NULL_RTX, 1, OPTAB_WIDEN);
4305
4306 emit_move_insn (mem, tmp);
4307 return 1;
4308 }
4309 else if (align >= 32 && bytes < 4)
4310 {
4311 rtx mem, tmp;
4312 HOST_WIDE_INT mask;
4313
4314 mem = adjust_address (orig_dst, SImode, ofs);
4315 set_mem_alias_set (mem, 0);
4316
4317 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4318
4319 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4320 NULL_RTX, 1, OPTAB_WIDEN);
4321
4322 emit_move_insn (mem, tmp);
4323 return 1;
4324 }
4325 }
4326 #endif
4327
4328 if (!TARGET_BWX && bytes >= 4)
4329 {
4330 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4331 bytes -= 4;
4332 ofs += 4;
4333 }
4334
4335 if (bytes >= 2)
4336 {
4337 if (align >= 16)
4338 {
4339 do {
4340 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4341 const0_rtx);
4342 bytes -= 2;
4343 ofs += 2;
4344 } while (bytes >= 2);
4345 }
4346 else if (! TARGET_BWX)
4347 {
4348 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4349 bytes -= 2;
4350 ofs += 2;
4351 }
4352 }
4353
4354 while (bytes > 0)
4355 {
4356 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4357 bytes -= 1;
4358 ofs += 1;
4359 }
4360
4361 return 1;
4362 }
4363
4364 /* Returns a mask so that zap(x, value) == x & mask. */
4365
4366 rtx
4367 alpha_expand_zap_mask (HOST_WIDE_INT value)
4368 {
4369 rtx result;
4370 int i;
4371
4372 if (HOST_BITS_PER_WIDE_INT >= 64)
4373 {
4374 HOST_WIDE_INT mask = 0;
4375
4376 for (i = 7; i >= 0; --i)
4377 {
4378 mask <<= 8;
4379 if (!((value >> i) & 1))
4380 mask |= 0xff;
4381 }
4382
4383 result = gen_int_mode (mask, DImode);
4384 }
4385 else if (HOST_BITS_PER_WIDE_INT == 32)
4386 {
4387 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4388
4389 for (i = 7; i >= 4; --i)
4390 {
4391 mask_hi <<= 8;
4392 if (!((value >> i) & 1))
4393 mask_hi |= 0xff;
4394 }
4395
4396 for (i = 3; i >= 0; --i)
4397 {
4398 mask_lo <<= 8;
4399 if (!((value >> i) & 1))
4400 mask_lo |= 0xff;
4401 }
4402
4403 result = immed_double_const (mask_lo, mask_hi, DImode);
4404 }
4405 else
4406 abort ();
4407
4408 return result;
4409 }
4410
4411 void
4412 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4413 enum machine_mode mode,
4414 rtx op0, rtx op1, rtx op2)
4415 {
4416 op0 = gen_lowpart (mode, op0);
4417
4418 if (op1 == const0_rtx)
4419 op1 = CONST0_RTX (mode);
4420 else
4421 op1 = gen_lowpart (mode, op1);
4422
4423 if (op2 == const0_rtx)
4424 op2 = CONST0_RTX (mode);
4425 else
4426 op2 = gen_lowpart (mode, op2);
4427
4428 emit_insn ((*gen) (op0, op1, op2));
4429 }
4430 \f
4431 /* Adjust the cost of a scheduling dependency. Return the new cost of
4432 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4433
4434 static int
4435 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4436 {
4437 enum attr_type insn_type, dep_insn_type;
4438
4439 /* If the dependence is an anti-dependence, there is no cost. For an
4440 output dependence, there is sometimes a cost, but it doesn't seem
4441 worth handling those few cases. */
4442 if (REG_NOTE_KIND (link) != 0)
4443 return cost;
4444
4445 /* If we can't recognize the insns, we can't really do anything. */
4446 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4447 return cost;
4448
4449 insn_type = get_attr_type (insn);
4450 dep_insn_type = get_attr_type (dep_insn);
4451
4452 /* Bring in the user-defined memory latency. */
4453 if (dep_insn_type == TYPE_ILD
4454 || dep_insn_type == TYPE_FLD
4455 || dep_insn_type == TYPE_LDSYM)
4456 cost += alpha_memory_latency-1;
4457
4458 /* Everything else handled in DFA bypasses now. */
4459
4460 return cost;
4461 }
4462
4463 /* The number of instructions that can be issued per cycle. */
4464
4465 static int
4466 alpha_issue_rate (void)
4467 {
4468 return (alpha_cpu == PROCESSOR_EV4 ? 2 : 4);
4469 }
4470
4471 /* How many alternative schedules to try. This should be as wide as the
4472 scheduling freedom in the DFA, but no wider. Making this value too
4473 large results extra work for the scheduler.
4474
4475 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4476 alternative schedules. For EV5, we can choose between E0/E1 and
4477 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4478
4479 static int
4480 alpha_multipass_dfa_lookahead (void)
4481 {
4482 return (alpha_cpu == PROCESSOR_EV6 ? 4 : 2);
4483 }
4484 \f
4485 /* Machine-specific function data. */
4486
4487 struct machine_function GTY(())
4488 {
4489 /* For unicosmk. */
4490 /* List of call information words for calls from this function. */
4491 struct rtx_def *first_ciw;
4492 struct rtx_def *last_ciw;
4493 int ciw_count;
4494
4495 /* List of deferred case vectors. */
4496 struct rtx_def *addr_list;
4497
4498 /* For OSF. */
4499 const char *some_ld_name;
4500
4501 /* For TARGET_LD_BUGGY_LDGP. */
4502 struct rtx_def *gp_save_rtx;
4503 };
4504
4505 /* How to allocate a 'struct machine_function'. */
4506
4507 static struct machine_function *
4508 alpha_init_machine_status (void)
4509 {
4510 return ((struct machine_function *)
4511 ggc_alloc_cleared (sizeof (struct machine_function)));
4512 }
4513
4514 /* Functions to save and restore alpha_return_addr_rtx. */
4515
4516 /* Start the ball rolling with RETURN_ADDR_RTX. */
4517
4518 rtx
4519 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4520 {
4521 if (count != 0)
4522 return const0_rtx;
4523
4524 return get_hard_reg_initial_val (Pmode, REG_RA);
4525 }
4526
4527 /* Return or create a memory slot containing the gp value for the current
4528 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4529
4530 rtx
4531 alpha_gp_save_rtx (void)
4532 {
4533 rtx seq, m = cfun->machine->gp_save_rtx;
4534
4535 if (m == NULL)
4536 {
4537 start_sequence ();
4538
4539 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4540 m = validize_mem (m);
4541 emit_move_insn (m, pic_offset_table_rtx);
4542
4543 seq = get_insns ();
4544 end_sequence ();
4545 emit_insn_after (seq, entry_of_function ());
4546
4547 cfun->machine->gp_save_rtx = m;
4548 }
4549
4550 return m;
4551 }
4552
4553 static int
4554 alpha_ra_ever_killed (void)
4555 {
4556 rtx top;
4557
4558 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4559 return regs_ever_live[REG_RA];
4560
4561 push_topmost_sequence ();
4562 top = get_insns ();
4563 pop_topmost_sequence ();
4564
4565 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4566 }
4567
4568 \f
4569 /* Return the trap mode suffix applicable to the current
4570 instruction, or NULL. */
4571
4572 static const char *
4573 get_trap_mode_suffix (void)
4574 {
4575 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4576
4577 switch (s)
4578 {
4579 case TRAP_SUFFIX_NONE:
4580 return NULL;
4581
4582 case TRAP_SUFFIX_SU:
4583 if (alpha_fptm >= ALPHA_FPTM_SU)
4584 return "su";
4585 return NULL;
4586
4587 case TRAP_SUFFIX_SUI:
4588 if (alpha_fptm >= ALPHA_FPTM_SUI)
4589 return "sui";
4590 return NULL;
4591
4592 case TRAP_SUFFIX_V_SV:
4593 switch (alpha_fptm)
4594 {
4595 case ALPHA_FPTM_N:
4596 return NULL;
4597 case ALPHA_FPTM_U:
4598 return "v";
4599 case ALPHA_FPTM_SU:
4600 case ALPHA_FPTM_SUI:
4601 return "sv";
4602 }
4603 break;
4604
4605 case TRAP_SUFFIX_V_SV_SVI:
4606 switch (alpha_fptm)
4607 {
4608 case ALPHA_FPTM_N:
4609 return NULL;
4610 case ALPHA_FPTM_U:
4611 return "v";
4612 case ALPHA_FPTM_SU:
4613 return "sv";
4614 case ALPHA_FPTM_SUI:
4615 return "svi";
4616 }
4617 break;
4618
4619 case TRAP_SUFFIX_U_SU_SUI:
4620 switch (alpha_fptm)
4621 {
4622 case ALPHA_FPTM_N:
4623 return NULL;
4624 case ALPHA_FPTM_U:
4625 return "u";
4626 case ALPHA_FPTM_SU:
4627 return "su";
4628 case ALPHA_FPTM_SUI:
4629 return "sui";
4630 }
4631 break;
4632 }
4633 abort ();
4634 }
4635
4636 /* Return the rounding mode suffix applicable to the current
4637 instruction, or NULL. */
4638
4639 static const char *
4640 get_round_mode_suffix (void)
4641 {
4642 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4643
4644 switch (s)
4645 {
4646 case ROUND_SUFFIX_NONE:
4647 return NULL;
4648 case ROUND_SUFFIX_NORMAL:
4649 switch (alpha_fprm)
4650 {
4651 case ALPHA_FPRM_NORM:
4652 return NULL;
4653 case ALPHA_FPRM_MINF:
4654 return "m";
4655 case ALPHA_FPRM_CHOP:
4656 return "c";
4657 case ALPHA_FPRM_DYN:
4658 return "d";
4659 }
4660 break;
4661
4662 case ROUND_SUFFIX_C:
4663 return "c";
4664 }
4665 abort ();
4666 }
4667
4668 /* Locate some local-dynamic symbol still in use by this function
4669 so that we can print its name in some movdi_er_tlsldm pattern. */
4670
4671 static int
4672 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4673 {
4674 rtx x = *px;
4675
4676 if (GET_CODE (x) == SYMBOL_REF
4677 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4678 {
4679 cfun->machine->some_ld_name = XSTR (x, 0);
4680 return 1;
4681 }
4682
4683 return 0;
4684 }
4685
4686 static const char *
4687 get_some_local_dynamic_name (void)
4688 {
4689 rtx insn;
4690
4691 if (cfun->machine->some_ld_name)
4692 return cfun->machine->some_ld_name;
4693
4694 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4695 if (INSN_P (insn)
4696 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4697 return cfun->machine->some_ld_name;
4698
4699 abort ();
4700 }
4701
4702 /* Print an operand. Recognize special options, documented below. */
4703
4704 void
4705 print_operand (FILE *file, rtx x, int code)
4706 {
4707 int i;
4708
4709 switch (code)
4710 {
4711 case '~':
4712 /* Print the assembler name of the current function. */
4713 assemble_name (file, alpha_fnname);
4714 break;
4715
4716 case '&':
4717 assemble_name (file, get_some_local_dynamic_name ());
4718 break;
4719
4720 case '/':
4721 {
4722 const char *trap = get_trap_mode_suffix ();
4723 const char *round = get_round_mode_suffix ();
4724
4725 if (trap || round)
4726 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4727 (trap ? trap : ""), (round ? round : ""));
4728 break;
4729 }
4730
4731 case ',':
4732 /* Generates single precision instruction suffix. */
4733 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4734 break;
4735
4736 case '-':
4737 /* Generates double precision instruction suffix. */
4738 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4739 break;
4740
4741 case '+':
4742 /* Generates a nop after a noreturn call at the very end of the
4743 function. */
4744 if (next_real_insn (current_output_insn) == 0)
4745 fprintf (file, "\n\tnop");
4746 break;
4747
4748 case '#':
4749 if (alpha_this_literal_sequence_number == 0)
4750 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4751 fprintf (file, "%d", alpha_this_literal_sequence_number);
4752 break;
4753
4754 case '*':
4755 if (alpha_this_gpdisp_sequence_number == 0)
4756 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4757 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4758 break;
4759
4760 case 'H':
4761 if (GET_CODE (x) == HIGH)
4762 output_addr_const (file, XEXP (x, 0));
4763 else
4764 output_operand_lossage ("invalid %%H value");
4765 break;
4766
4767 case 'J':
4768 {
4769 const char *lituse;
4770
4771 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4772 {
4773 x = XVECEXP (x, 0, 0);
4774 lituse = "lituse_tlsgd";
4775 }
4776 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4777 {
4778 x = XVECEXP (x, 0, 0);
4779 lituse = "lituse_tlsldm";
4780 }
4781 else if (GET_CODE (x) == CONST_INT)
4782 lituse = "lituse_jsr";
4783 else
4784 {
4785 output_operand_lossage ("invalid %%J value");
4786 break;
4787 }
4788
4789 if (x != const0_rtx)
4790 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4791 }
4792 break;
4793
4794 case 'r':
4795 /* If this operand is the constant zero, write it as "$31". */
4796 if (GET_CODE (x) == REG)
4797 fprintf (file, "%s", reg_names[REGNO (x)]);
4798 else if (x == CONST0_RTX (GET_MODE (x)))
4799 fprintf (file, "$31");
4800 else
4801 output_operand_lossage ("invalid %%r value");
4802 break;
4803
4804 case 'R':
4805 /* Similar, but for floating-point. */
4806 if (GET_CODE (x) == REG)
4807 fprintf (file, "%s", reg_names[REGNO (x)]);
4808 else if (x == CONST0_RTX (GET_MODE (x)))
4809 fprintf (file, "$f31");
4810 else
4811 output_operand_lossage ("invalid %%R value");
4812 break;
4813
4814 case 'N':
4815 /* Write the 1's complement of a constant. */
4816 if (GET_CODE (x) != CONST_INT)
4817 output_operand_lossage ("invalid %%N value");
4818
4819 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
4820 break;
4821
4822 case 'P':
4823 /* Write 1 << C, for a constant C. */
4824 if (GET_CODE (x) != CONST_INT)
4825 output_operand_lossage ("invalid %%P value");
4826
4827 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
4828 break;
4829
4830 case 'h':
4831 /* Write the high-order 16 bits of a constant, sign-extended. */
4832 if (GET_CODE (x) != CONST_INT)
4833 output_operand_lossage ("invalid %%h value");
4834
4835 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
4836 break;
4837
4838 case 'L':
4839 /* Write the low-order 16 bits of a constant, sign-extended. */
4840 if (GET_CODE (x) != CONST_INT)
4841 output_operand_lossage ("invalid %%L value");
4842
4843 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4844 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
4845 break;
4846
4847 case 'm':
4848 /* Write mask for ZAP insn. */
4849 if (GET_CODE (x) == CONST_DOUBLE)
4850 {
4851 HOST_WIDE_INT mask = 0;
4852 HOST_WIDE_INT value;
4853
4854 value = CONST_DOUBLE_LOW (x);
4855 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4856 i++, value >>= 8)
4857 if (value & 0xff)
4858 mask |= (1 << i);
4859
4860 value = CONST_DOUBLE_HIGH (x);
4861 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
4862 i++, value >>= 8)
4863 if (value & 0xff)
4864 mask |= (1 << (i + sizeof (int)));
4865
4866 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
4867 }
4868
4869 else if (GET_CODE (x) == CONST_INT)
4870 {
4871 HOST_WIDE_INT mask = 0, value = INTVAL (x);
4872
4873 for (i = 0; i < 8; i++, value >>= 8)
4874 if (value & 0xff)
4875 mask |= (1 << i);
4876
4877 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
4878 }
4879 else
4880 output_operand_lossage ("invalid %%m value");
4881 break;
4882
4883 case 'M':
4884 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
4885 if (GET_CODE (x) != CONST_INT
4886 || (INTVAL (x) != 8 && INTVAL (x) != 16
4887 && INTVAL (x) != 32 && INTVAL (x) != 64))
4888 output_operand_lossage ("invalid %%M value");
4889
4890 fprintf (file, "%s",
4891 (INTVAL (x) == 8 ? "b"
4892 : INTVAL (x) == 16 ? "w"
4893 : INTVAL (x) == 32 ? "l"
4894 : "q"));
4895 break;
4896
4897 case 'U':
4898 /* Similar, except do it from the mask. */
4899 if (GET_CODE (x) == CONST_INT)
4900 {
4901 HOST_WIDE_INT value = INTVAL (x);
4902
4903 if (value == 0xff)
4904 {
4905 fputc ('b', file);
4906 break;
4907 }
4908 if (value == 0xffff)
4909 {
4910 fputc ('w', file);
4911 break;
4912 }
4913 if (value == 0xffffffff)
4914 {
4915 fputc ('l', file);
4916 break;
4917 }
4918 if (value == -1)
4919 {
4920 fputc ('q', file);
4921 break;
4922 }
4923 }
4924 else if (HOST_BITS_PER_WIDE_INT == 32
4925 && GET_CODE (x) == CONST_DOUBLE
4926 && CONST_DOUBLE_LOW (x) == 0xffffffff
4927 && CONST_DOUBLE_HIGH (x) == 0)
4928 {
4929 fputc ('l', file);
4930 break;
4931 }
4932 output_operand_lossage ("invalid %%U value");
4933 break;
4934
4935 case 's':
4936 /* Write the constant value divided by 8 for little-endian mode or
4937 (56 - value) / 8 for big-endian mode. */
4938
4939 if (GET_CODE (x) != CONST_INT
4940 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
4941 ? 56
4942 : 64)
4943 || (INTVAL (x) & 7) != 0)
4944 output_operand_lossage ("invalid %%s value");
4945
4946 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
4947 WORDS_BIG_ENDIAN
4948 ? (56 - INTVAL (x)) / 8
4949 : INTVAL (x) / 8);
4950 break;
4951
4952 case 'S':
4953 /* Same, except compute (64 - c) / 8 */
4954
4955 if (GET_CODE (x) != CONST_INT
4956 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
4957 && (INTVAL (x) & 7) != 8)
4958 output_operand_lossage ("invalid %%s value");
4959
4960 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
4961 break;
4962
4963 case 't':
4964 {
4965 /* On Unicos/Mk systems: use a DEX expression if the symbol
4966 clashes with a register name. */
4967 int dex = unicosmk_need_dex (x);
4968 if (dex)
4969 fprintf (file, "DEX(%d)", dex);
4970 else
4971 output_addr_const (file, x);
4972 }
4973 break;
4974
4975 case 'C': case 'D': case 'c': case 'd':
4976 /* Write out comparison name. */
4977 {
4978 enum rtx_code c = GET_CODE (x);
4979
4980 if (!COMPARISON_P (x))
4981 output_operand_lossage ("invalid %%C value");
4982
4983 else if (code == 'D')
4984 c = reverse_condition (c);
4985 else if (code == 'c')
4986 c = swap_condition (c);
4987 else if (code == 'd')
4988 c = swap_condition (reverse_condition (c));
4989
4990 if (c == LEU)
4991 fprintf (file, "ule");
4992 else if (c == LTU)
4993 fprintf (file, "ult");
4994 else if (c == UNORDERED)
4995 fprintf (file, "un");
4996 else
4997 fprintf (file, "%s", GET_RTX_NAME (c));
4998 }
4999 break;
5000
5001 case 'E':
5002 /* Write the divide or modulus operator. */
5003 switch (GET_CODE (x))
5004 {
5005 case DIV:
5006 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5007 break;
5008 case UDIV:
5009 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5010 break;
5011 case MOD:
5012 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5013 break;
5014 case UMOD:
5015 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5016 break;
5017 default:
5018 output_operand_lossage ("invalid %%E value");
5019 break;
5020 }
5021 break;
5022
5023 case 'A':
5024 /* Write "_u" for unaligned access. */
5025 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5026 fprintf (file, "_u");
5027 break;
5028
5029 case 0:
5030 if (GET_CODE (x) == REG)
5031 fprintf (file, "%s", reg_names[REGNO (x)]);
5032 else if (GET_CODE (x) == MEM)
5033 output_address (XEXP (x, 0));
5034 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5035 {
5036 switch (XINT (XEXP (x, 0), 1))
5037 {
5038 case UNSPEC_DTPREL:
5039 case UNSPEC_TPREL:
5040 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5041 break;
5042 default:
5043 output_operand_lossage ("unknown relocation unspec");
5044 break;
5045 }
5046 }
5047 else
5048 output_addr_const (file, x);
5049 break;
5050
5051 default:
5052 output_operand_lossage ("invalid %%xn code");
5053 }
5054 }
5055
5056 void
5057 print_operand_address (FILE *file, rtx addr)
5058 {
5059 int basereg = 31;
5060 HOST_WIDE_INT offset = 0;
5061
5062 if (GET_CODE (addr) == AND)
5063 addr = XEXP (addr, 0);
5064
5065 if (GET_CODE (addr) == PLUS
5066 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5067 {
5068 offset = INTVAL (XEXP (addr, 1));
5069 addr = XEXP (addr, 0);
5070 }
5071
5072 if (GET_CODE (addr) == LO_SUM)
5073 {
5074 const char *reloc16, *reloclo;
5075 rtx op1 = XEXP (addr, 1);
5076
5077 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5078 {
5079 op1 = XEXP (op1, 0);
5080 switch (XINT (op1, 1))
5081 {
5082 case UNSPEC_DTPREL:
5083 reloc16 = NULL;
5084 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5085 break;
5086 case UNSPEC_TPREL:
5087 reloc16 = NULL;
5088 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5089 break;
5090 default:
5091 output_operand_lossage ("unknown relocation unspec");
5092 return;
5093 }
5094
5095 output_addr_const (file, XVECEXP (op1, 0, 0));
5096 }
5097 else
5098 {
5099 reloc16 = "gprel";
5100 reloclo = "gprellow";
5101 output_addr_const (file, op1);
5102 }
5103
5104 if (offset)
5105 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5106
5107 addr = XEXP (addr, 0);
5108 if (GET_CODE (addr) == REG)
5109 basereg = REGNO (addr);
5110 else if (GET_CODE (addr) == SUBREG
5111 && GET_CODE (SUBREG_REG (addr)) == REG)
5112 basereg = subreg_regno (addr);
5113 else
5114 abort ();
5115
5116 fprintf (file, "($%d)\t\t!%s", basereg,
5117 (basereg == 29 ? reloc16 : reloclo));
5118 return;
5119 }
5120
5121 if (GET_CODE (addr) == REG)
5122 basereg = REGNO (addr);
5123 else if (GET_CODE (addr) == SUBREG
5124 && GET_CODE (SUBREG_REG (addr)) == REG)
5125 basereg = subreg_regno (addr);
5126 else if (GET_CODE (addr) == CONST_INT)
5127 offset = INTVAL (addr);
5128
5129 #if TARGET_ABI_OPEN_VMS
5130 else if (GET_CODE (addr) == SYMBOL_REF)
5131 {
5132 fprintf (file, "%s", XSTR (addr, 0));
5133 return;
5134 }
5135 else if (GET_CODE (addr) == CONST
5136 && GET_CODE (XEXP (addr, 0)) == PLUS
5137 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
5138 {
5139 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5140 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5141 INTVAL (XEXP (XEXP (addr, 0), 1)));
5142 return;
5143 }
5144 #endif
5145
5146 else
5147 abort ();
5148
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5150 }
5151 \f
5152 /* Emit RTL insns to initialize the variable parts of a trampoline at
5153 TRAMP. FNADDR is an RTX for the address of the function's pure
5154 code. CXT is an RTX for the static chain value for the function.
5155
5156 The three offset parameters are for the individual template's
5157 layout. A JMPOFS < 0 indicates that the trampoline does not
5158 contain instructions at all.
5159
5160 We assume here that a function will be called many more times than
5161 its address is taken (e.g., it might be passed to qsort), so we
5162 take the trouble to initialize the "hint" field in the JMP insn.
5163 Note that the hint field is PC (new) + 4 * bits 13:0. */
5164
5165 void
5166 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5167 int fnofs, int cxtofs, int jmpofs)
5168 {
5169 rtx temp, temp1, addr;
5170 /* VMS really uses DImode pointers in memory at this point. */
5171 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5172
5173 #ifdef POINTERS_EXTEND_UNSIGNED
5174 fnaddr = convert_memory_address (mode, fnaddr);
5175 cxt = convert_memory_address (mode, cxt);
5176 #endif
5177
5178 /* Store function address and CXT. */
5179 addr = memory_address (mode, plus_constant (tramp, fnofs));
5180 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5181 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5182 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5183
5184 /* This has been disabled since the hint only has a 32k range, and in
5185 no existing OS is the stack within 32k of the text segment. */
5186 if (0 && jmpofs >= 0)
5187 {
5188 /* Compute hint value. */
5189 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5190 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5191 OPTAB_WIDEN);
5192 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5193 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5194 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5195 GEN_INT (0x3fff), 0);
5196
5197 /* Merge in the hint. */
5198 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5199 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5200 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5201 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5202 OPTAB_WIDEN);
5203 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5204 }
5205
5206 #ifdef ENABLE_EXECUTE_STACK
5207 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5208 0, VOIDmode, 1, tramp, Pmode);
5209 #endif
5210
5211 if (jmpofs >= 0)
5212 emit_insn (gen_imb ());
5213 }
5214 \f
5215 /* Determine where to put an argument to a function.
5216 Value is zero to push the argument on the stack,
5217 or a hard register in which to store the argument.
5218
5219 MODE is the argument's machine mode.
5220 TYPE is the data type of the argument (as a tree).
5221 This is null for libcalls where that information may
5222 not be available.
5223 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5224 the preceding args and about the function being called.
5225 NAMED is nonzero if this argument is a named parameter
5226 (otherwise it is an extra parameter matching an ellipsis).
5227
5228 On Alpha the first 6 words of args are normally in registers
5229 and the rest are pushed. */
5230
5231 rtx
5232 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5233 int named ATTRIBUTE_UNUSED)
5234 {
5235 int basereg;
5236 int num_args;
5237
5238 /* Don't get confused and pass small structures in FP registers. */
5239 if (type && AGGREGATE_TYPE_P (type))
5240 basereg = 16;
5241 else
5242 {
5243 #ifdef ENABLE_CHECKING
5244 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5245 values here. */
5246 if (COMPLEX_MODE_P (mode))
5247 abort ();
5248 #endif
5249
5250 /* Set up defaults for FP operands passed in FP registers, and
5251 integral operands passed in integer registers. */
5252 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5253 basereg = 32 + 16;
5254 else
5255 basereg = 16;
5256 }
5257
5258 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5259 the three platforms, so we can't avoid conditional compilation. */
5260 #if TARGET_ABI_OPEN_VMS
5261 {
5262 if (mode == VOIDmode)
5263 return alpha_arg_info_reg_val (cum);
5264
5265 num_args = cum.num_args;
5266 if (num_args >= 6
5267 || targetm.calls.must_pass_in_stack (mode, type))
5268 return NULL_RTX;
5269 }
5270 #elif TARGET_ABI_UNICOSMK
5271 {
5272 int size;
5273
5274 /* If this is the last argument, generate the call info word (CIW). */
5275 /* ??? We don't include the caller's line number in the CIW because
5276 I don't know how to determine it if debug infos are turned off. */
5277 if (mode == VOIDmode)
5278 {
5279 int i;
5280 HOST_WIDE_INT lo;
5281 HOST_WIDE_INT hi;
5282 rtx ciw;
5283
5284 lo = 0;
5285
5286 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5287 if (cum.reg_args_type[i])
5288 lo |= (1 << (7 - i));
5289
5290 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5291 lo |= 7;
5292 else
5293 lo |= cum.num_reg_words;
5294
5295 #if HOST_BITS_PER_WIDE_INT == 32
5296 hi = (cum.num_args << 20) | cum.num_arg_words;
5297 #else
5298 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5299 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5300 hi = 0;
5301 #endif
5302 ciw = immed_double_const (lo, hi, DImode);
5303
5304 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5305 UNSPEC_UMK_LOAD_CIW);
5306 }
5307
5308 size = ALPHA_ARG_SIZE (mode, type, named);
5309 num_args = cum.num_reg_words;
5310 if (cum.force_stack
5311 || cum.num_reg_words + size > 6
5312 || targetm.calls.must_pass_in_stack (mode, type))
5313 return NULL_RTX;
5314 else if (type && TYPE_MODE (type) == BLKmode)
5315 {
5316 rtx reg1, reg2;
5317
5318 reg1 = gen_rtx_REG (DImode, num_args + 16);
5319 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5320
5321 /* The argument fits in two registers. Note that we still need to
5322 reserve a register for empty structures. */
5323 if (size == 0)
5324 return NULL_RTX;
5325 else if (size == 1)
5326 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5327 else
5328 {
5329 reg2 = gen_rtx_REG (DImode, num_args + 17);
5330 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5331 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5332 }
5333 }
5334 }
5335 #elif TARGET_ABI_OSF
5336 {
5337 if (cum >= 6)
5338 return NULL_RTX;
5339 num_args = cum;
5340
5341 /* VOID is passed as a special flag for "last argument". */
5342 if (type == void_type_node)
5343 basereg = 16;
5344 else if (targetm.calls.must_pass_in_stack (mode, type))
5345 return NULL_RTX;
5346 }
5347 #else
5348 #error Unhandled ABI
5349 #endif
5350
5351 return gen_rtx_REG (mode, num_args + basereg);
5352 }
5353
5354 static int
5355 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5356 enum machine_mode mode ATTRIBUTE_UNUSED,
5357 tree type ATTRIBUTE_UNUSED,
5358 bool named ATTRIBUTE_UNUSED)
5359 {
5360 int words = 0;
5361
5362 #if TARGET_ABI_OPEN_VMS
5363 if (cum->num_args < 6
5364 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5365 words = 6 - (CUM).num_args;
5366 #elif TARGET_ABI_UNICOSMK
5367 /* Never any split arguments. */
5368 #elif TARGET_ABI_OSF
5369 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5370 words = 6 - *cum;
5371 #else
5372 #error Unhandled ABI
5373 #endif
5374
5375 return words * UNITS_PER_WORD;
5376 }
5377
5378
5379 /* Return true if TYPE must be returned in memory, instead of in registers. */
5380
5381 static bool
5382 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5383 {
5384 enum machine_mode mode = VOIDmode;
5385 int size;
5386
5387 if (type)
5388 {
5389 mode = TYPE_MODE (type);
5390
5391 /* All aggregates are returned in memory. */
5392 if (AGGREGATE_TYPE_P (type))
5393 return true;
5394 }
5395
5396 size = GET_MODE_SIZE (mode);
5397 switch (GET_MODE_CLASS (mode))
5398 {
5399 case MODE_VECTOR_FLOAT:
5400 /* Pass all float vectors in memory, like an aggregate. */
5401 return true;
5402
5403 case MODE_COMPLEX_FLOAT:
5404 /* We judge complex floats on the size of their element,
5405 not the size of the whole type. */
5406 size = GET_MODE_UNIT_SIZE (mode);
5407 break;
5408
5409 case MODE_INT:
5410 case MODE_FLOAT:
5411 case MODE_COMPLEX_INT:
5412 case MODE_VECTOR_INT:
5413 break;
5414
5415 default:
5416 /* ??? We get called on all sorts of random stuff from
5417 aggregate_value_p. We can't abort, but it's not clear
5418 what's safe to return. Pretend it's a struct I guess. */
5419 return true;
5420 }
5421
5422 /* Otherwise types must fit in one register. */
5423 return size > UNITS_PER_WORD;
5424 }
5425
5426 /* Return true if TYPE should be passed by invisible reference. */
5427
5428 static bool
5429 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5430 enum machine_mode mode,
5431 tree type ATTRIBUTE_UNUSED,
5432 bool named ATTRIBUTE_UNUSED)
5433 {
5434 return mode == TFmode || mode == TCmode;
5435 }
5436
5437 /* Define how to find the value returned by a function. VALTYPE is the
5438 data type of the value (as a tree). If the precise function being
5439 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5440 MODE is set instead of VALTYPE for libcalls.
5441
5442 On Alpha the value is found in $0 for integer functions and
5443 $f0 for floating-point functions. */
5444
5445 rtx
5446 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5447 enum machine_mode mode)
5448 {
5449 unsigned int regnum, dummy;
5450 enum mode_class class;
5451
5452 #ifdef ENABLE_CHECKING
5453 if (valtype && alpha_return_in_memory (valtype, func))
5454 abort ();
5455 #endif
5456
5457 if (valtype)
5458 mode = TYPE_MODE (valtype);
5459
5460 class = GET_MODE_CLASS (mode);
5461 switch (class)
5462 {
5463 case MODE_INT:
5464 PROMOTE_MODE (mode, dummy, valtype);
5465 /* FALLTHRU */
5466
5467 case MODE_COMPLEX_INT:
5468 case MODE_VECTOR_INT:
5469 regnum = 0;
5470 break;
5471
5472 case MODE_FLOAT:
5473 regnum = 32;
5474 break;
5475
5476 case MODE_COMPLEX_FLOAT:
5477 {
5478 enum machine_mode cmode = GET_MODE_INNER (mode);
5479
5480 return gen_rtx_PARALLEL
5481 (VOIDmode,
5482 gen_rtvec (2,
5483 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5484 const0_rtx),
5485 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5486 GEN_INT (GET_MODE_SIZE (cmode)))));
5487 }
5488
5489 default:
5490 abort ();
5491 }
5492
5493 return gen_rtx_REG (mode, regnum);
5494 }
5495
5496 /* TCmode complex values are passed by invisible reference. We
5497 should not split these values. */
5498
5499 static bool
5500 alpha_split_complex_arg (tree type)
5501 {
5502 return TYPE_MODE (type) != TCmode;
5503 }
5504
5505 static tree
5506 alpha_build_builtin_va_list (void)
5507 {
5508 tree base, ofs, space, record, type_decl;
5509
5510 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5511 return ptr_type_node;
5512
5513 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5514 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5515 TREE_CHAIN (record) = type_decl;
5516 TYPE_NAME (record) = type_decl;
5517
5518 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5519
5520 /* Dummy field to prevent alignment warnings. */
5521 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5522 DECL_FIELD_CONTEXT (space) = record;
5523 DECL_ARTIFICIAL (space) = 1;
5524 DECL_IGNORED_P (space) = 1;
5525
5526 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5527 integer_type_node);
5528 DECL_FIELD_CONTEXT (ofs) = record;
5529 TREE_CHAIN (ofs) = space;
5530
5531 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5532 ptr_type_node);
5533 DECL_FIELD_CONTEXT (base) = record;
5534 TREE_CHAIN (base) = ofs;
5535
5536 TYPE_FIELDS (record) = base;
5537 layout_type (record);
5538
5539 return record;
5540 }
5541
5542 /* Perform any needed actions needed for a function that is receiving a
5543 variable number of arguments. */
5544
5545 static void
5546 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
5547 enum machine_mode mode ATTRIBUTE_UNUSED,
5548 tree type ATTRIBUTE_UNUSED,
5549 int *pretend_size, int no_rtl)
5550 {
5551 #if TARGET_ABI_UNICOSMK
5552 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5553 arguments on the stack. Unfortunately, it doesn't always store the first
5554 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5555 with stdargs as we always have at least one named argument there. */
5556 int num_reg_words = pcum->num_reg_words;
5557 if (num_reg_words < 6)
5558 {
5559 if (!no_rtl)
5560 {
5561 emit_insn (gen_umk_mismatch_args (GEN_INT (num_reg_words + 1)));
5562 emit_insn (gen_arg_home_umk ());
5563 }
5564 *pretend_size = 0;
5565 }
5566 #elif TARGET_ABI_OPEN_VMS
5567 /* For VMS, we allocate space for all 6 arg registers plus a count.
5568
5569 However, if NO registers need to be saved, don't allocate any space.
5570 This is not only because we won't need the space, but because AP
5571 includes the current_pretend_args_size and we don't want to mess up
5572 any ap-relative addresses already made. */
5573 if (pcum->num_args < 6)
5574 {
5575 if (!no_rtl)
5576 {
5577 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5578 emit_insn (gen_arg_home ());
5579 }
5580 *pretend_size = 7 * UNITS_PER_WORD;
5581 }
5582 #else
5583 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5584 only push those that are remaining. However, if NO registers need to
5585 be saved, don't allocate any space. This is not only because we won't
5586 need the space, but because AP includes the current_pretend_args_size
5587 and we don't want to mess up any ap-relative addresses already made.
5588
5589 If we are not to use the floating-point registers, save the integer
5590 registers where we would put the floating-point registers. This is
5591 not the most efficient way to implement varargs with just one register
5592 class, but it isn't worth doing anything more efficient in this rare
5593 case. */
5594 CUMULATIVE_ARGS cum = *pcum;
5595
5596 if (cum >= 6)
5597 return;
5598
5599 if (!no_rtl)
5600 {
5601 int set = get_varargs_alias_set ();
5602 rtx tmp;
5603
5604 tmp = gen_rtx_MEM (BLKmode,
5605 plus_constant (virtual_incoming_args_rtx,
5606 (cum + 6) * UNITS_PER_WORD));
5607 set_mem_alias_set (tmp, set);
5608 move_block_from_reg (16 + cum, tmp, 6 - cum);
5609
5610 tmp = gen_rtx_MEM (BLKmode,
5611 plus_constant (virtual_incoming_args_rtx,
5612 cum * UNITS_PER_WORD));
5613 set_mem_alias_set (tmp, set);
5614 move_block_from_reg (16 + (TARGET_FPREGS ? 32 : 0) + cum, tmp,
5615 6 - cum);
5616 }
5617 *pretend_size = 12 * UNITS_PER_WORD;
5618 #endif
5619 }
5620
5621 void
5622 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
5623 {
5624 HOST_WIDE_INT offset;
5625 tree t, offset_field, base_field;
5626
5627 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
5628 return;
5629
5630 if (TARGET_ABI_UNICOSMK)
5631 std_expand_builtin_va_start (valist, nextarg);
5632
5633 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
5634 up by 48, storing fp arg registers in the first 48 bytes, and the
5635 integer arg registers in the next 48 bytes. This is only done,
5636 however, if any integer registers need to be stored.
5637
5638 If no integer registers need be stored, then we must subtract 48
5639 in order to account for the integer arg registers which are counted
5640 in argsize above, but which are not actually stored on the stack.
5641 Must further be careful here about structures straddling the last
5642 integer argument register; that futzes with pretend_args_size,
5643 which changes the meaning of AP. */
5644
5645 if (NUM_ARGS <= 6)
5646 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
5647 else
5648 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
5649
5650 if (TARGET_ABI_OPEN_VMS)
5651 {
5652 nextarg = plus_constant (nextarg, offset);
5653 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
5654 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
5655 make_tree (ptr_type_node, nextarg));
5656 TREE_SIDE_EFFECTS (t) = 1;
5657
5658 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5659 }
5660 else
5661 {
5662 base_field = TYPE_FIELDS (TREE_TYPE (valist));
5663 offset_field = TREE_CHAIN (base_field);
5664
5665 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5666 valist, base_field, NULL_TREE);
5667 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5668 valist, offset_field, NULL_TREE);
5669
5670 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
5671 t = build (PLUS_EXPR, ptr_type_node, t,
5672 build_int_cst (NULL_TREE, offset));
5673 t = build (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
5674 TREE_SIDE_EFFECTS (t) = 1;
5675 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5676
5677 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
5678 t = build (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
5679 TREE_SIDE_EFFECTS (t) = 1;
5680 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5681 }
5682 }
5683
5684 static tree
5685 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
5686 {
5687 tree type_size, ptr_type, addend, t, addr, internal_post;
5688
5689 /* If the type could not be passed in registers, skip the block
5690 reserved for the registers. */
5691 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
5692 {
5693 t = build_int_cst (TREE_TYPE (offset), 6*8);
5694 t = build (MODIFY_EXPR, TREE_TYPE (offset), offset,
5695 build (MAX_EXPR, TREE_TYPE (offset), offset, t));
5696 gimplify_and_add (t, pre_p);
5697 }
5698
5699 addend = offset;
5700 ptr_type = build_pointer_type (type);
5701
5702 if (TREE_CODE (type) == COMPLEX_TYPE)
5703 {
5704 tree real_part, imag_part, real_temp;
5705
5706 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5707 offset, pre_p);
5708
5709 /* Copy the value into a new temporary, lest the formal temporary
5710 be reused out from under us. */
5711 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
5712
5713 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
5714 offset, pre_p);
5715
5716 return build (COMPLEX_EXPR, type, real_temp, imag_part);
5717 }
5718 else if (TREE_CODE (type) == REAL_TYPE)
5719 {
5720 tree fpaddend, cond, fourtyeight;
5721
5722 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
5723 fpaddend = fold (build (MINUS_EXPR, TREE_TYPE (addend),
5724 addend, fourtyeight));
5725 cond = fold (build (LT_EXPR, boolean_type_node, addend, fourtyeight));
5726 addend = fold (build (COND_EXPR, TREE_TYPE (addend), cond,
5727 fpaddend, addend));
5728 }
5729
5730 /* Build the final address and force that value into a temporary. */
5731 addr = build (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
5732 fold_convert (ptr_type, addend));
5733 internal_post = NULL;
5734 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
5735 append_to_statement_list (internal_post, pre_p);
5736
5737 /* Update the offset field. */
5738 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
5739 if (type_size == NULL || TREE_OVERFLOW (type_size))
5740 t = size_zero_node;
5741 else
5742 {
5743 t = size_binop (PLUS_EXPR, type_size, size_int (7));
5744 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
5745 t = size_binop (MULT_EXPR, t, size_int (8));
5746 }
5747 t = fold_convert (TREE_TYPE (offset), t);
5748 t = build (MODIFY_EXPR, void_type_node, offset,
5749 build (PLUS_EXPR, TREE_TYPE (offset), offset, t));
5750 gimplify_and_add (t, pre_p);
5751
5752 return build_fold_indirect_ref (addr);
5753 }
5754
5755 static tree
5756 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5757 {
5758 tree offset_field, base_field, offset, base, t, r;
5759 bool indirect;
5760
5761 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5762 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5763
5764 base_field = TYPE_FIELDS (va_list_type_node);
5765 offset_field = TREE_CHAIN (base_field);
5766 base_field = build (COMPONENT_REF, TREE_TYPE (base_field),
5767 valist, base_field, NULL_TREE);
5768 offset_field = build (COMPONENT_REF, TREE_TYPE (offset_field),
5769 valist, offset_field, NULL_TREE);
5770
5771 /* Pull the fields of the structure out into temporaries. Since we never
5772 modify the base field, we can use a formal temporary. Sign-extend the
5773 offset field so that it's the proper width for pointer arithmetic. */
5774 base = get_formal_tmp_var (base_field, pre_p);
5775
5776 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
5777 offset = get_initialized_tmp_var (t, pre_p, NULL);
5778
5779 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5780 if (indirect)
5781 type = build_pointer_type (type);
5782
5783 /* Find the value. Note that this will be a stable indirection, or
5784 a composite of stable indirections in the case of complex. */
5785 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
5786
5787 /* Stuff the offset temporary back into its field. */
5788 t = build (MODIFY_EXPR, void_type_node, offset_field,
5789 fold_convert (TREE_TYPE (offset_field), offset));
5790 gimplify_and_add (t, pre_p);
5791
5792 if (indirect)
5793 r = build_fold_indirect_ref (r);
5794
5795 return r;
5796 }
5797 \f
5798 /* Builtins. */
5799
5800 enum alpha_builtin
5801 {
5802 ALPHA_BUILTIN_CMPBGE,
5803 ALPHA_BUILTIN_EXTBL,
5804 ALPHA_BUILTIN_EXTWL,
5805 ALPHA_BUILTIN_EXTLL,
5806 ALPHA_BUILTIN_EXTQL,
5807 ALPHA_BUILTIN_EXTWH,
5808 ALPHA_BUILTIN_EXTLH,
5809 ALPHA_BUILTIN_EXTQH,
5810 ALPHA_BUILTIN_INSBL,
5811 ALPHA_BUILTIN_INSWL,
5812 ALPHA_BUILTIN_INSLL,
5813 ALPHA_BUILTIN_INSQL,
5814 ALPHA_BUILTIN_INSWH,
5815 ALPHA_BUILTIN_INSLH,
5816 ALPHA_BUILTIN_INSQH,
5817 ALPHA_BUILTIN_MSKBL,
5818 ALPHA_BUILTIN_MSKWL,
5819 ALPHA_BUILTIN_MSKLL,
5820 ALPHA_BUILTIN_MSKQL,
5821 ALPHA_BUILTIN_MSKWH,
5822 ALPHA_BUILTIN_MSKLH,
5823 ALPHA_BUILTIN_MSKQH,
5824 ALPHA_BUILTIN_UMULH,
5825 ALPHA_BUILTIN_ZAP,
5826 ALPHA_BUILTIN_ZAPNOT,
5827 ALPHA_BUILTIN_AMASK,
5828 ALPHA_BUILTIN_IMPLVER,
5829 ALPHA_BUILTIN_RPCC,
5830 ALPHA_BUILTIN_THREAD_POINTER,
5831 ALPHA_BUILTIN_SET_THREAD_POINTER,
5832
5833 /* TARGET_MAX */
5834 ALPHA_BUILTIN_MINUB8,
5835 ALPHA_BUILTIN_MINSB8,
5836 ALPHA_BUILTIN_MINUW4,
5837 ALPHA_BUILTIN_MINSW4,
5838 ALPHA_BUILTIN_MAXUB8,
5839 ALPHA_BUILTIN_MAXSB8,
5840 ALPHA_BUILTIN_MAXUW4,
5841 ALPHA_BUILTIN_MAXSW4,
5842 ALPHA_BUILTIN_PERR,
5843 ALPHA_BUILTIN_PKLB,
5844 ALPHA_BUILTIN_PKWB,
5845 ALPHA_BUILTIN_UNPKBL,
5846 ALPHA_BUILTIN_UNPKBW,
5847
5848 /* TARGET_CIX */
5849 ALPHA_BUILTIN_CTTZ,
5850 ALPHA_BUILTIN_CTLZ,
5851 ALPHA_BUILTIN_CTPOP,
5852
5853 ALPHA_BUILTIN_max
5854 };
5855
5856 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
5857 CODE_FOR_builtin_cmpbge,
5858 CODE_FOR_builtin_extbl,
5859 CODE_FOR_builtin_extwl,
5860 CODE_FOR_builtin_extll,
5861 CODE_FOR_builtin_extql,
5862 CODE_FOR_builtin_extwh,
5863 CODE_FOR_builtin_extlh,
5864 CODE_FOR_builtin_extqh,
5865 CODE_FOR_builtin_insbl,
5866 CODE_FOR_builtin_inswl,
5867 CODE_FOR_builtin_insll,
5868 CODE_FOR_builtin_insql,
5869 CODE_FOR_builtin_inswh,
5870 CODE_FOR_builtin_inslh,
5871 CODE_FOR_builtin_insqh,
5872 CODE_FOR_builtin_mskbl,
5873 CODE_FOR_builtin_mskwl,
5874 CODE_FOR_builtin_mskll,
5875 CODE_FOR_builtin_mskql,
5876 CODE_FOR_builtin_mskwh,
5877 CODE_FOR_builtin_msklh,
5878 CODE_FOR_builtin_mskqh,
5879 CODE_FOR_umuldi3_highpart,
5880 CODE_FOR_builtin_zap,
5881 CODE_FOR_builtin_zapnot,
5882 CODE_FOR_builtin_amask,
5883 CODE_FOR_builtin_implver,
5884 CODE_FOR_builtin_rpcc,
5885 CODE_FOR_load_tp,
5886 CODE_FOR_set_tp,
5887
5888 /* TARGET_MAX */
5889 CODE_FOR_builtin_minub8,
5890 CODE_FOR_builtin_minsb8,
5891 CODE_FOR_builtin_minuw4,
5892 CODE_FOR_builtin_minsw4,
5893 CODE_FOR_builtin_maxub8,
5894 CODE_FOR_builtin_maxsb8,
5895 CODE_FOR_builtin_maxuw4,
5896 CODE_FOR_builtin_maxsw4,
5897 CODE_FOR_builtin_perr,
5898 CODE_FOR_builtin_pklb,
5899 CODE_FOR_builtin_pkwb,
5900 CODE_FOR_builtin_unpkbl,
5901 CODE_FOR_builtin_unpkbw,
5902
5903 /* TARGET_CIX */
5904 CODE_FOR_ctzdi2,
5905 CODE_FOR_clzdi2,
5906 CODE_FOR_popcountdi2
5907 };
5908
5909 struct alpha_builtin_def
5910 {
5911 const char *name;
5912 enum alpha_builtin code;
5913 unsigned int target_mask;
5914 bool is_const;
5915 };
5916
5917 static struct alpha_builtin_def const zero_arg_builtins[] = {
5918 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
5919 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
5920 };
5921
5922 static struct alpha_builtin_def const one_arg_builtins[] = {
5923 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
5924 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
5925 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
5926 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
5927 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
5928 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
5929 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
5930 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
5931 };
5932
5933 static struct alpha_builtin_def const two_arg_builtins[] = {
5934 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
5935 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
5936 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
5937 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
5938 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
5939 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
5940 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
5941 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
5942 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
5943 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
5944 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
5945 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
5946 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
5947 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
5948 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
5949 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
5950 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
5951 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
5952 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
5953 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
5954 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
5955 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
5956 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
5957 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
5958 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
5959 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
5960 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
5961 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
5962 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
5963 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
5964 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
5965 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
5966 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
5967 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
5968 };
5969
5970 static GTY(()) tree alpha_v8qi_u;
5971 static GTY(()) tree alpha_v8qi_s;
5972 static GTY(()) tree alpha_v4hi_u;
5973 static GTY(()) tree alpha_v4hi_s;
5974
5975 static void
5976 alpha_init_builtins (void)
5977 {
5978 const struct alpha_builtin_def *p;
5979 tree ftype, attrs[2];
5980 size_t i;
5981
5982 attrs[0] = tree_cons (get_identifier ("nothrow"), NULL, NULL);
5983 attrs[1] = tree_cons (get_identifier ("const"), NULL, attrs[0]);
5984
5985 ftype = build_function_type (long_integer_type_node, void_list_node);
5986
5987 p = zero_arg_builtins;
5988 for (i = 0; i < ARRAY_SIZE (zero_arg_builtins); ++i, ++p)
5989 if ((target_flags & p->target_mask) == p->target_mask)
5990 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
5991 NULL, attrs[p->is_const]);
5992
5993 ftype = build_function_type_list (long_integer_type_node,
5994 long_integer_type_node, NULL_TREE);
5995
5996 p = one_arg_builtins;
5997 for (i = 0; i < ARRAY_SIZE (one_arg_builtins); ++i, ++p)
5998 if ((target_flags & p->target_mask) == p->target_mask)
5999 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6000 NULL, attrs[p->is_const]);
6001
6002 ftype = build_function_type_list (long_integer_type_node,
6003 long_integer_type_node,
6004 long_integer_type_node, NULL_TREE);
6005
6006 p = two_arg_builtins;
6007 for (i = 0; i < ARRAY_SIZE (two_arg_builtins); ++i, ++p)
6008 if ((target_flags & p->target_mask) == p->target_mask)
6009 lang_hooks.builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6010 NULL, attrs[p->is_const]);
6011
6012 ftype = build_function_type (ptr_type_node, void_list_node);
6013 lang_hooks.builtin_function ("__builtin_thread_pointer", ftype,
6014 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6015 NULL, attrs[0]);
6016
6017 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6018 lang_hooks.builtin_function ("__builtin_set_thread_pointer", ftype,
6019 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6020 NULL, attrs[0]);
6021
6022 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6023 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6024 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6025 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6026 }
6027
6028 /* Expand an expression EXP that calls a built-in function,
6029 with result going to TARGET if that's convenient
6030 (and in mode MODE if that's convenient).
6031 SUBTARGET may be used as the target for computing one of EXP's operands.
6032 IGNORE is nonzero if the value is to be ignored. */
6033
6034 static rtx
6035 alpha_expand_builtin (tree exp, rtx target,
6036 rtx subtarget ATTRIBUTE_UNUSED,
6037 enum machine_mode mode ATTRIBUTE_UNUSED,
6038 int ignore ATTRIBUTE_UNUSED)
6039 {
6040 #define MAX_ARGS 2
6041
6042 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
6043 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6044 tree arglist = TREE_OPERAND (exp, 1);
6045 enum insn_code icode;
6046 rtx op[MAX_ARGS], pat;
6047 int arity;
6048 bool nonvoid;
6049
6050 if (fcode >= ALPHA_BUILTIN_max)
6051 internal_error ("bad builtin fcode");
6052 icode = code_for_builtin[fcode];
6053 if (icode == 0)
6054 internal_error ("bad builtin fcode");
6055
6056 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6057
6058 for (arglist = TREE_OPERAND (exp, 1), arity = 0;
6059 arglist;
6060 arglist = TREE_CHAIN (arglist), arity++)
6061 {
6062 const struct insn_operand_data *insn_op;
6063
6064 tree arg = TREE_VALUE (arglist);
6065 if (arg == error_mark_node)
6066 return NULL_RTX;
6067 if (arity > MAX_ARGS)
6068 return NULL_RTX;
6069
6070 insn_op = &insn_data[icode].operand[arity + nonvoid];
6071
6072 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6073
6074 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6075 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6076 }
6077
6078 if (nonvoid)
6079 {
6080 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6081 if (!target
6082 || GET_MODE (target) != tmode
6083 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6084 target = gen_reg_rtx (tmode);
6085 }
6086
6087 switch (arity)
6088 {
6089 case 0:
6090 pat = GEN_FCN (icode) (target);
6091 break;
6092 case 1:
6093 if (nonvoid)
6094 pat = GEN_FCN (icode) (target, op[0]);
6095 else
6096 pat = GEN_FCN (icode) (op[0]);
6097 break;
6098 case 2:
6099 pat = GEN_FCN (icode) (target, op[0], op[1]);
6100 break;
6101 default:
6102 abort ();
6103 }
6104 if (!pat)
6105 return NULL_RTX;
6106 emit_insn (pat);
6107
6108 if (nonvoid)
6109 return target;
6110 else
6111 return const0_rtx;
6112 }
6113
6114
6115 /* Several bits below assume HWI >= 64 bits. This should be enforced
6116 by config.gcc. */
6117 #if HOST_BITS_PER_WIDE_INT < 64
6118 # error "HOST_WIDE_INT too small"
6119 #endif
6120
6121 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6122 with an 8 bit output vector. OPINT contains the integer operands; bit N
6123 of OP_CONST is set if OPINT[N] is valid. */
6124
6125 static tree
6126 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6127 {
6128 if (op_const == 3)
6129 {
6130 int i, val;
6131 for (i = 0, val = 0; i < 8; ++i)
6132 {
6133 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6134 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6135 if (c0 >= c1)
6136 val |= 1 << i;
6137 }
6138 return build_int_cst (long_integer_type_node, val);
6139 }
6140 else if (op_const == 1 && opint[0] == 0)
6141 return build_int_cst (long_integer_type_node, 0xff);
6142 return NULL;
6143 }
6144
6145 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6146 specialized form of an AND operation. Other byte manipulation instructions
6147 are defined in terms of this instruction, so this is also used as a
6148 subroutine for other builtins.
6149
6150 OP contains the tree operands; OPINT contains the extracted integer values.
6151 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6152 OPINT may be considered. */
6153
6154 static tree
6155 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6156 long op_const)
6157 {
6158 if (op_const & 2)
6159 {
6160 unsigned HOST_WIDE_INT mask = 0;
6161 int i;
6162
6163 for (i = 0; i < 8; ++i)
6164 if ((opint[1] >> i) & 1)
6165 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6166
6167 if (op_const & 1)
6168 return build_int_cst (long_integer_type_node, opint[0] & mask);
6169
6170 if (op)
6171 return fold (build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6172 build_int_cst (long_integer_type_node, mask)));
6173 }
6174 else if ((op_const & 1) && opint[0] == 0)
6175 return build_int_cst (long_integer_type_node, 0);
6176 return NULL;
6177 }
6178
6179 /* Fold the builtins for the EXT family of instructions. */
6180
6181 static tree
6182 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6183 long op_const, unsigned HOST_WIDE_INT bytemask,
6184 bool is_high)
6185 {
6186 long zap_const = 2;
6187 tree *zap_op = NULL;
6188
6189 if (op_const & 2)
6190 {
6191 unsigned HOST_WIDE_INT loc;
6192
6193 loc = opint[1] & 7;
6194 if (BYTES_BIG_ENDIAN)
6195 loc ^= 7;
6196 loc *= 8;
6197
6198 if (loc != 0)
6199 {
6200 if (op_const & 1)
6201 {
6202 unsigned HOST_WIDE_INT temp = opint[0];
6203 if (is_high)
6204 temp <<= loc;
6205 else
6206 temp >>= loc;
6207 opint[0] = temp;
6208 zap_const = 3;
6209 }
6210 }
6211 else
6212 zap_op = op;
6213 }
6214
6215 opint[1] = bytemask;
6216 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6217 }
6218
6219 /* Fold the builtins for the INS family of instructions. */
6220
6221 static tree
6222 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6223 long op_const, unsigned HOST_WIDE_INT bytemask,
6224 bool is_high)
6225 {
6226 if ((op_const & 1) && opint[0] == 0)
6227 return build_int_cst (long_integer_type_node, 0);
6228
6229 if (op_const & 2)
6230 {
6231 unsigned HOST_WIDE_INT temp, loc, byteloc;
6232 tree *zap_op = NULL;
6233
6234 loc = opint[1] & 7;
6235 if (BYTES_BIG_ENDIAN)
6236 loc ^= 7;
6237 bytemask <<= loc;
6238
6239 temp = opint[0];
6240 if (is_high)
6241 {
6242 byteloc = (64 - (loc * 8)) & 0x3f;
6243 if (byteloc == 0)
6244 zap_op = op;
6245 else
6246 temp >>= byteloc;
6247 bytemask >>= 8;
6248 }
6249 else
6250 {
6251 byteloc = loc * 8;
6252 if (byteloc == 0)
6253 zap_op = op;
6254 else
6255 temp <<= byteloc;
6256 }
6257
6258 opint[0] = temp;
6259 opint[1] = bytemask;
6260 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6261 }
6262
6263 return NULL;
6264 }
6265
6266 static tree
6267 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6268 long op_const, unsigned HOST_WIDE_INT bytemask,
6269 bool is_high)
6270 {
6271 if (op_const & 2)
6272 {
6273 unsigned HOST_WIDE_INT loc;
6274
6275 loc = opint[1] & 7;
6276 if (BYTES_BIG_ENDIAN)
6277 loc ^= 7;
6278 bytemask <<= loc;
6279
6280 if (is_high)
6281 bytemask >>= 8;
6282
6283 opint[1] = bytemask ^ 0xff;
6284 }
6285
6286 return alpha_fold_builtin_zapnot (op, opint, op_const);
6287 }
6288
6289 static tree
6290 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6291 {
6292 switch (op_const)
6293 {
6294 case 3:
6295 {
6296 unsigned HOST_WIDE_INT l;
6297 HOST_WIDE_INT h;
6298
6299 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6300
6301 #if HOST_BITS_PER_WIDE_INT > 64
6302 # error fixme
6303 #endif
6304
6305 return build_int_cst (long_integer_type_node, h);
6306 }
6307
6308 case 1:
6309 opint[1] = opint[0];
6310 /* FALLTHRU */
6311 case 2:
6312 /* Note that (X*1) >> 64 == 0. */
6313 if (opint[1] == 0 || opint[1] == 1)
6314 return build_int_cst (long_integer_type_node, 0);
6315 break;
6316 }
6317 return NULL;
6318 }
6319
6320 static tree
6321 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6322 {
6323 tree op0 = fold_convert (vtype, op[0]);
6324 tree op1 = fold_convert (vtype, op[1]);
6325 tree val = fold (build2 (code, vtype, op0, op1));
6326 return fold_convert (long_integer_type_node, val);
6327 }
6328
6329 static tree
6330 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6331 {
6332 unsigned HOST_WIDE_INT temp = 0;
6333 int i;
6334
6335 if (op_const != 3)
6336 return NULL;
6337
6338 for (i = 0; i < 8; ++i)
6339 {
6340 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6341 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6342 if (a >= b)
6343 temp += a - b;
6344 else
6345 temp += b - a;
6346 }
6347
6348 return build_int_cst (long_integer_type_node, temp);
6349 }
6350
6351 static tree
6352 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6353 {
6354 unsigned HOST_WIDE_INT temp;
6355
6356 if (op_const == 0)
6357 return NULL;
6358
6359 temp = opint[0] & 0xff;
6360 temp |= (opint[0] >> 24) & 0xff00;
6361
6362 return build_int_cst (long_integer_type_node, temp);
6363 }
6364
6365 static tree
6366 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6367 {
6368 unsigned HOST_WIDE_INT temp;
6369
6370 if (op_const == 0)
6371 return NULL;
6372
6373 temp = opint[0] & 0xff;
6374 temp |= (opint[0] >> 8) & 0xff00;
6375 temp |= (opint[0] >> 16) & 0xff0000;
6376 temp |= (opint[0] >> 24) & 0xff000000;
6377
6378 return build_int_cst (long_integer_type_node, temp);
6379 }
6380
6381 static tree
6382 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6383 {
6384 unsigned HOST_WIDE_INT temp;
6385
6386 if (op_const == 0)
6387 return NULL;
6388
6389 temp = opint[0] & 0xff;
6390 temp |= (opint[0] & 0xff00) << 24;
6391
6392 return build_int_cst (long_integer_type_node, temp);
6393 }
6394
6395 static tree
6396 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6397 {
6398 unsigned HOST_WIDE_INT temp;
6399
6400 if (op_const == 0)
6401 return NULL;
6402
6403 temp = opint[0] & 0xff;
6404 temp |= (opint[0] & 0x0000ff00) << 8;
6405 temp |= (opint[0] & 0x00ff0000) << 16;
6406 temp |= (opint[0] & 0xff000000) << 24;
6407
6408 return build_int_cst (long_integer_type_node, temp);
6409 }
6410
6411 static tree
6412 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6413 {
6414 unsigned HOST_WIDE_INT temp;
6415
6416 if (op_const == 0)
6417 return NULL;
6418
6419 if (opint[0] == 0)
6420 temp = 64;
6421 else
6422 temp = exact_log2 (opint[0] & -opint[0]);
6423
6424 return build_int_cst (long_integer_type_node, temp);
6425 }
6426
6427 static tree
6428 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6429 {
6430 unsigned HOST_WIDE_INT temp;
6431
6432 if (op_const == 0)
6433 return NULL;
6434
6435 if (opint[0] == 0)
6436 temp = 64;
6437 else
6438 temp = 64 - floor_log2 (opint[0]) - 1;
6439
6440 return build_int_cst (long_integer_type_node, temp);
6441 }
6442
6443 static tree
6444 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6445 {
6446 unsigned HOST_WIDE_INT temp, op;
6447
6448 if (op_const == 0)
6449 return NULL;
6450
6451 op = opint[0];
6452 temp = 0;
6453 while (op)
6454 temp++, op &= op - 1;
6455
6456 return build_int_cst (long_integer_type_node, temp);
6457 }
6458
6459 /* Fold one of our builtin functions. */
6460
6461 static tree
6462 alpha_fold_builtin (tree exp, bool ignore ATTRIBUTE_UNUSED)
6463 {
6464 tree fndecl = get_callee_fndecl (exp);
6465 tree op[MAX_ARGS], t;
6466 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6467 long op_const = 0, arity = 0;
6468
6469 for (t = TREE_OPERAND (exp, 1); t ; t = TREE_CHAIN (t), ++arity)
6470 {
6471 tree arg = TREE_VALUE (t);
6472 if (arg == error_mark_node)
6473 return NULL;
6474 if (arity >= MAX_ARGS)
6475 return NULL;
6476
6477 op[arity] = arg;
6478 opint[arity] = 0;
6479 if (TREE_CODE (arg) == INTEGER_CST)
6480 {
6481 op_const |= 1L << arity;
6482 opint[arity] = int_cst_value (arg);
6483 }
6484 }
6485
6486 switch (DECL_FUNCTION_CODE (fndecl))
6487 {
6488 case ALPHA_BUILTIN_CMPBGE:
6489 return alpha_fold_builtin_cmpbge (opint, op_const);
6490
6491 case ALPHA_BUILTIN_EXTBL:
6492 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6493 case ALPHA_BUILTIN_EXTWL:
6494 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6495 case ALPHA_BUILTIN_EXTLL:
6496 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6497 case ALPHA_BUILTIN_EXTQL:
6498 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6499 case ALPHA_BUILTIN_EXTWH:
6500 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6501 case ALPHA_BUILTIN_EXTLH:
6502 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6503 case ALPHA_BUILTIN_EXTQH:
6504 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6505
6506 case ALPHA_BUILTIN_INSBL:
6507 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6508 case ALPHA_BUILTIN_INSWL:
6509 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6510 case ALPHA_BUILTIN_INSLL:
6511 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6512 case ALPHA_BUILTIN_INSQL:
6513 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6514 case ALPHA_BUILTIN_INSWH:
6515 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6516 case ALPHA_BUILTIN_INSLH:
6517 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6518 case ALPHA_BUILTIN_INSQH:
6519 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6520
6521 case ALPHA_BUILTIN_MSKBL:
6522 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6523 case ALPHA_BUILTIN_MSKWL:
6524 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6525 case ALPHA_BUILTIN_MSKLL:
6526 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6527 case ALPHA_BUILTIN_MSKQL:
6528 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6529 case ALPHA_BUILTIN_MSKWH:
6530 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6531 case ALPHA_BUILTIN_MSKLH:
6532 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6533 case ALPHA_BUILTIN_MSKQH:
6534 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6535
6536 case ALPHA_BUILTIN_UMULH:
6537 return alpha_fold_builtin_umulh (opint, op_const);
6538
6539 case ALPHA_BUILTIN_ZAP:
6540 opint[1] ^= 0xff;
6541 /* FALLTHRU */
6542 case ALPHA_BUILTIN_ZAPNOT:
6543 return alpha_fold_builtin_zapnot (op, opint, op_const);
6544
6545 case ALPHA_BUILTIN_MINUB8:
6546 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6547 case ALPHA_BUILTIN_MINSB8:
6548 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
6549 case ALPHA_BUILTIN_MINUW4:
6550 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
6551 case ALPHA_BUILTIN_MINSW4:
6552 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
6553 case ALPHA_BUILTIN_MAXUB8:
6554 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
6555 case ALPHA_BUILTIN_MAXSB8:
6556 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
6557 case ALPHA_BUILTIN_MAXUW4:
6558 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
6559 case ALPHA_BUILTIN_MAXSW4:
6560 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
6561
6562 case ALPHA_BUILTIN_PERR:
6563 return alpha_fold_builtin_perr (opint, op_const);
6564 case ALPHA_BUILTIN_PKLB:
6565 return alpha_fold_builtin_pklb (opint, op_const);
6566 case ALPHA_BUILTIN_PKWB:
6567 return alpha_fold_builtin_pkwb (opint, op_const);
6568 case ALPHA_BUILTIN_UNPKBL:
6569 return alpha_fold_builtin_unpkbl (opint, op_const);
6570 case ALPHA_BUILTIN_UNPKBW:
6571 return alpha_fold_builtin_unpkbw (opint, op_const);
6572
6573 case ALPHA_BUILTIN_CTTZ:
6574 return alpha_fold_builtin_cttz (opint, op_const);
6575 case ALPHA_BUILTIN_CTLZ:
6576 return alpha_fold_builtin_ctlz (opint, op_const);
6577 case ALPHA_BUILTIN_CTPOP:
6578 return alpha_fold_builtin_ctpop (opint, op_const);
6579
6580 case ALPHA_BUILTIN_AMASK:
6581 case ALPHA_BUILTIN_IMPLVER:
6582 case ALPHA_BUILTIN_RPCC:
6583 case ALPHA_BUILTIN_THREAD_POINTER:
6584 case ALPHA_BUILTIN_SET_THREAD_POINTER:
6585 /* None of these are foldable at compile-time. */
6586 default:
6587 return NULL;
6588 }
6589 }
6590 \f
6591 /* This page contains routines that are used to determine what the function
6592 prologue and epilogue code will do and write them out. */
6593
6594 /* Compute the size of the save area in the stack. */
6595
6596 /* These variables are used for communication between the following functions.
6597 They indicate various things about the current function being compiled
6598 that are used to tell what kind of prologue, epilogue and procedure
6599 descriptor to generate. */
6600
6601 /* Nonzero if we need a stack procedure. */
6602 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
6603 static enum alpha_procedure_types alpha_procedure_type;
6604
6605 /* Register number (either FP or SP) that is used to unwind the frame. */
6606 static int vms_unwind_regno;
6607
6608 /* Register number used to save FP. We need not have one for RA since
6609 we don't modify it for register procedures. This is only defined
6610 for register frame procedures. */
6611 static int vms_save_fp_regno;
6612
6613 /* Register number used to reference objects off our PV. */
6614 static int vms_base_regno;
6615
6616 /* Compute register masks for saved registers. */
6617
6618 static void
6619 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
6620 {
6621 unsigned long imask = 0;
6622 unsigned long fmask = 0;
6623 unsigned int i;
6624
6625 /* When outputting a thunk, we don't have valid register life info,
6626 but assemble_start_function wants to output .frame and .mask
6627 directives. */
6628 if (current_function_is_thunk)
6629 {
6630 *imaskP = 0;
6631 *fmaskP = 0;
6632 return;
6633 }
6634
6635 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
6636 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
6637
6638 /* One for every register we have to save. */
6639 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6640 if (! fixed_regs[i] && ! call_used_regs[i]
6641 && regs_ever_live[i] && i != REG_RA
6642 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
6643 {
6644 if (i < 32)
6645 imask |= (1UL << i);
6646 else
6647 fmask |= (1UL << (i - 32));
6648 }
6649
6650 /* We need to restore these for the handler. */
6651 if (current_function_calls_eh_return)
6652 {
6653 for (i = 0; ; ++i)
6654 {
6655 unsigned regno = EH_RETURN_DATA_REGNO (i);
6656 if (regno == INVALID_REGNUM)
6657 break;
6658 imask |= 1UL << regno;
6659 }
6660 }
6661
6662 /* If any register spilled, then spill the return address also. */
6663 /* ??? This is required by the Digital stack unwind specification
6664 and isn't needed if we're doing Dwarf2 unwinding. */
6665 if (imask || fmask || alpha_ra_ever_killed ())
6666 imask |= (1UL << REG_RA);
6667
6668 *imaskP = imask;
6669 *fmaskP = fmask;
6670 }
6671
6672 int
6673 alpha_sa_size (void)
6674 {
6675 unsigned long mask[2];
6676 int sa_size = 0;
6677 int i, j;
6678
6679 alpha_sa_mask (&mask[0], &mask[1]);
6680
6681 if (TARGET_ABI_UNICOSMK)
6682 {
6683 if (mask[0] || mask[1])
6684 sa_size = 14;
6685 }
6686 else
6687 {
6688 for (j = 0; j < 2; ++j)
6689 for (i = 0; i < 32; ++i)
6690 if ((mask[j] >> i) & 1)
6691 sa_size++;
6692 }
6693
6694 if (TARGET_ABI_UNICOSMK)
6695 {
6696 /* We might not need to generate a frame if we don't make any calls
6697 (including calls to __T3E_MISMATCH if this is a vararg function),
6698 don't have any local variables which require stack slots, don't
6699 use alloca and have not determined that we need a frame for other
6700 reasons. */
6701
6702 alpha_procedure_type
6703 = (sa_size || get_frame_size() != 0
6704 || current_function_outgoing_args_size
6705 || current_function_stdarg || current_function_calls_alloca
6706 || frame_pointer_needed)
6707 ? PT_STACK : PT_REGISTER;
6708
6709 /* Always reserve space for saving callee-saved registers if we
6710 need a frame as required by the calling convention. */
6711 if (alpha_procedure_type == PT_STACK)
6712 sa_size = 14;
6713 }
6714 else if (TARGET_ABI_OPEN_VMS)
6715 {
6716 /* Start by assuming we can use a register procedure if we don't
6717 make any calls (REG_RA not used) or need to save any
6718 registers and a stack procedure if we do. */
6719 if ((mask[0] >> REG_RA) & 1)
6720 alpha_procedure_type = PT_STACK;
6721 else if (get_frame_size() != 0)
6722 alpha_procedure_type = PT_REGISTER;
6723 else
6724 alpha_procedure_type = PT_NULL;
6725
6726 /* Don't reserve space for saving FP & RA yet. Do that later after we've
6727 made the final decision on stack procedure vs register procedure. */
6728 if (alpha_procedure_type == PT_STACK)
6729 sa_size -= 2;
6730
6731 /* Decide whether to refer to objects off our PV via FP or PV.
6732 If we need FP for something else or if we receive a nonlocal
6733 goto (which expects PV to contain the value), we must use PV.
6734 Otherwise, start by assuming we can use FP. */
6735
6736 vms_base_regno
6737 = (frame_pointer_needed
6738 || current_function_has_nonlocal_label
6739 || alpha_procedure_type == PT_STACK
6740 || current_function_outgoing_args_size)
6741 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
6742
6743 /* If we want to copy PV into FP, we need to find some register
6744 in which to save FP. */
6745
6746 vms_save_fp_regno = -1;
6747 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
6748 for (i = 0; i < 32; i++)
6749 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
6750 vms_save_fp_regno = i;
6751
6752 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
6753 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
6754 else if (alpha_procedure_type == PT_NULL)
6755 vms_base_regno = REG_PV;
6756
6757 /* Stack unwinding should be done via FP unless we use it for PV. */
6758 vms_unwind_regno = (vms_base_regno == REG_PV
6759 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
6760
6761 /* If this is a stack procedure, allow space for saving FP and RA. */
6762 if (alpha_procedure_type == PT_STACK)
6763 sa_size += 2;
6764 }
6765 else
6766 {
6767 /* Our size must be even (multiple of 16 bytes). */
6768 if (sa_size & 1)
6769 sa_size++;
6770 }
6771
6772 return sa_size * 8;
6773 }
6774
6775 /* Define the offset between two registers, one to be eliminated,
6776 and the other its replacement, at the start of a routine. */
6777
6778 HOST_WIDE_INT
6779 alpha_initial_elimination_offset (unsigned int from,
6780 unsigned int to ATTRIBUTE_UNUSED)
6781 {
6782 HOST_WIDE_INT ret;
6783
6784 ret = alpha_sa_size ();
6785 ret += ALPHA_ROUND (current_function_outgoing_args_size);
6786
6787 if (from == FRAME_POINTER_REGNUM)
6788 ;
6789 else if (from == ARG_POINTER_REGNUM)
6790 ret += (ALPHA_ROUND (get_frame_size ()
6791 + current_function_pretend_args_size)
6792 - current_function_pretend_args_size);
6793 else
6794 abort ();
6795
6796 return ret;
6797 }
6798
6799 int
6800 alpha_pv_save_size (void)
6801 {
6802 alpha_sa_size ();
6803 return alpha_procedure_type == PT_STACK ? 8 : 0;
6804 }
6805
6806 int
6807 alpha_using_fp (void)
6808 {
6809 alpha_sa_size ();
6810 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
6811 }
6812
6813 #if TARGET_ABI_OPEN_VMS
6814
6815 const struct attribute_spec vms_attribute_table[] =
6816 {
6817 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
6818 { "overlaid", 0, 0, true, false, false, NULL },
6819 { "global", 0, 0, true, false, false, NULL },
6820 { "initialize", 0, 0, true, false, false, NULL },
6821 { NULL, 0, 0, false, false, false, NULL }
6822 };
6823
6824 #endif
6825
6826 static int
6827 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
6828 {
6829 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
6830 }
6831
6832 int
6833 alpha_find_lo_sum_using_gp (rtx insn)
6834 {
6835 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
6836 }
6837
6838 static int
6839 alpha_does_function_need_gp (void)
6840 {
6841 rtx insn;
6842
6843 /* The GP being variable is an OSF abi thing. */
6844 if (! TARGET_ABI_OSF)
6845 return 0;
6846
6847 /* We need the gp to load the address of __mcount. */
6848 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
6849 return 1;
6850
6851 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
6852 if (current_function_is_thunk)
6853 return 1;
6854
6855 /* The nonlocal receiver pattern assumes that the gp is valid for
6856 the nested function. Reasonable because it's almost always set
6857 correctly already. For the cases where that's wrong, make sure
6858 the nested function loads its gp on entry. */
6859 if (current_function_has_nonlocal_goto)
6860 return 1;
6861
6862 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
6863 Even if we are a static function, we still need to do this in case
6864 our address is taken and passed to something like qsort. */
6865
6866 push_topmost_sequence ();
6867 insn = get_insns ();
6868 pop_topmost_sequence ();
6869
6870 for (; insn; insn = NEXT_INSN (insn))
6871 if (INSN_P (insn)
6872 && GET_CODE (PATTERN (insn)) != USE
6873 && GET_CODE (PATTERN (insn)) != CLOBBER
6874 && get_attr_usegp (insn))
6875 return 1;
6876
6877 return 0;
6878 }
6879
6880 \f
6881 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
6882 sequences. */
6883
6884 static rtx
6885 set_frame_related_p (void)
6886 {
6887 rtx seq = get_insns ();
6888 rtx insn;
6889
6890 end_sequence ();
6891
6892 if (!seq)
6893 return NULL_RTX;
6894
6895 if (INSN_P (seq))
6896 {
6897 insn = seq;
6898 while (insn != NULL_RTX)
6899 {
6900 RTX_FRAME_RELATED_P (insn) = 1;
6901 insn = NEXT_INSN (insn);
6902 }
6903 seq = emit_insn (seq);
6904 }
6905 else
6906 {
6907 seq = emit_insn (seq);
6908 RTX_FRAME_RELATED_P (seq) = 1;
6909 }
6910 return seq;
6911 }
6912
6913 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
6914
6915 /* Generates a store with the proper unwind info attached. VALUE is
6916 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
6917 contains SP+FRAME_BIAS, and that is the unwind info that should be
6918 generated. If FRAME_REG != VALUE, then VALUE is being stored on
6919 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
6920
6921 static void
6922 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
6923 HOST_WIDE_INT base_ofs, rtx frame_reg)
6924 {
6925 rtx addr, mem, insn;
6926
6927 addr = plus_constant (base_reg, base_ofs);
6928 mem = gen_rtx_MEM (DImode, addr);
6929 set_mem_alias_set (mem, alpha_sr_alias_set);
6930
6931 insn = emit_move_insn (mem, value);
6932 RTX_FRAME_RELATED_P (insn) = 1;
6933
6934 if (frame_bias || value != frame_reg)
6935 {
6936 if (frame_bias)
6937 {
6938 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
6939 mem = gen_rtx_MEM (DImode, addr);
6940 }
6941
6942 REG_NOTES (insn)
6943 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6944 gen_rtx_SET (VOIDmode, mem, frame_reg),
6945 REG_NOTES (insn));
6946 }
6947 }
6948
6949 static void
6950 emit_frame_store (unsigned int regno, rtx base_reg,
6951 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
6952 {
6953 rtx reg = gen_rtx_REG (DImode, regno);
6954 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
6955 }
6956
6957 /* Write function prologue. */
6958
6959 /* On vms we have two kinds of functions:
6960
6961 - stack frame (PROC_STACK)
6962 these are 'normal' functions with local vars and which are
6963 calling other functions
6964 - register frame (PROC_REGISTER)
6965 keeps all data in registers, needs no stack
6966
6967 We must pass this to the assembler so it can generate the
6968 proper pdsc (procedure descriptor)
6969 This is done with the '.pdesc' command.
6970
6971 On not-vms, we don't really differentiate between the two, as we can
6972 simply allocate stack without saving registers. */
6973
6974 void
6975 alpha_expand_prologue (void)
6976 {
6977 /* Registers to save. */
6978 unsigned long imask = 0;
6979 unsigned long fmask = 0;
6980 /* Stack space needed for pushing registers clobbered by us. */
6981 HOST_WIDE_INT sa_size;
6982 /* Complete stack size needed. */
6983 HOST_WIDE_INT frame_size;
6984 /* Offset from base reg to register save area. */
6985 HOST_WIDE_INT reg_offset;
6986 rtx sa_reg;
6987 int i;
6988
6989 sa_size = alpha_sa_size ();
6990
6991 frame_size = get_frame_size ();
6992 if (TARGET_ABI_OPEN_VMS)
6993 frame_size = ALPHA_ROUND (sa_size
6994 + (alpha_procedure_type == PT_STACK ? 8 : 0)
6995 + frame_size
6996 + current_function_pretend_args_size);
6997 else if (TARGET_ABI_UNICOSMK)
6998 /* We have to allocate space for the DSIB if we generate a frame. */
6999 frame_size = ALPHA_ROUND (sa_size
7000 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7001 + ALPHA_ROUND (frame_size
7002 + current_function_outgoing_args_size);
7003 else
7004 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7005 + sa_size
7006 + ALPHA_ROUND (frame_size
7007 + current_function_pretend_args_size));
7008
7009 if (TARGET_ABI_OPEN_VMS)
7010 reg_offset = 8;
7011 else
7012 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7013
7014 alpha_sa_mask (&imask, &fmask);
7015
7016 /* Emit an insn to reload GP, if needed. */
7017 if (TARGET_ABI_OSF)
7018 {
7019 alpha_function_needs_gp = alpha_does_function_need_gp ();
7020 if (alpha_function_needs_gp)
7021 emit_insn (gen_prologue_ldgp ());
7022 }
7023
7024 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7025 the call to mcount ourselves, rather than having the linker do it
7026 magically in response to -pg. Since _mcount has special linkage,
7027 don't represent the call as a call. */
7028 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7029 emit_insn (gen_prologue_mcount ());
7030
7031 if (TARGET_ABI_UNICOSMK)
7032 unicosmk_gen_dsib (&imask);
7033
7034 /* Adjust the stack by the frame size. If the frame size is > 4096
7035 bytes, we need to be sure we probe somewhere in the first and last
7036 4096 bytes (we can probably get away without the latter test) and
7037 every 8192 bytes in between. If the frame size is > 32768, we
7038 do this in a loop. Otherwise, we generate the explicit probe
7039 instructions.
7040
7041 Note that we are only allowed to adjust sp once in the prologue. */
7042
7043 if (frame_size <= 32768)
7044 {
7045 if (frame_size > 4096)
7046 {
7047 int probed = 4096;
7048
7049 do
7050 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7051 ? -probed + 64
7052 : -probed)));
7053 while ((probed += 8192) < frame_size);
7054
7055 /* We only have to do this probe if we aren't saving registers. */
7056 if (sa_size == 0 && probed + 4096 < frame_size)
7057 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7058 }
7059
7060 if (frame_size != 0)
7061 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7062 GEN_INT (TARGET_ABI_UNICOSMK
7063 ? -frame_size + 64
7064 : -frame_size))));
7065 }
7066 else
7067 {
7068 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7069 number of 8192 byte blocks to probe. We then probe each block
7070 in the loop and then set SP to the proper location. If the
7071 amount remaining is > 4096, we have to do one more probe if we
7072 are not saving any registers. */
7073
7074 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7075 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7076 rtx ptr = gen_rtx_REG (DImode, 22);
7077 rtx count = gen_rtx_REG (DImode, 23);
7078 rtx seq;
7079
7080 emit_move_insn (count, GEN_INT (blocks));
7081 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7082 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7083
7084 /* Because of the difficulty in emitting a new basic block this
7085 late in the compilation, generate the loop as a single insn. */
7086 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7087
7088 if (leftover > 4096 && sa_size == 0)
7089 {
7090 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7091 MEM_VOLATILE_P (last) = 1;
7092 emit_move_insn (last, const0_rtx);
7093 }
7094
7095 if (TARGET_ABI_WINDOWS_NT)
7096 {
7097 /* For NT stack unwind (done by 'reverse execution'), it's
7098 not OK to take the result of a loop, even though the value
7099 is already in ptr, so we reload it via a single operation
7100 and subtract it to sp.
7101
7102 Yes, that's correct -- we have to reload the whole constant
7103 into a temporary via ldah+lda then subtract from sp. */
7104
7105 HOST_WIDE_INT lo, hi;
7106 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7107 hi = frame_size - lo;
7108
7109 emit_move_insn (ptr, GEN_INT (hi));
7110 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7111 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7112 ptr));
7113 }
7114 else
7115 {
7116 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7117 GEN_INT (-leftover)));
7118 }
7119
7120 /* This alternative is special, because the DWARF code cannot
7121 possibly intuit through the loop above. So we invent this
7122 note it looks at instead. */
7123 RTX_FRAME_RELATED_P (seq) = 1;
7124 REG_NOTES (seq)
7125 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7126 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7127 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7128 GEN_INT (TARGET_ABI_UNICOSMK
7129 ? -frame_size + 64
7130 : -frame_size))),
7131 REG_NOTES (seq));
7132 }
7133
7134 if (!TARGET_ABI_UNICOSMK)
7135 {
7136 HOST_WIDE_INT sa_bias = 0;
7137
7138 /* Cope with very large offsets to the register save area. */
7139 sa_reg = stack_pointer_rtx;
7140 if (reg_offset + sa_size > 0x8000)
7141 {
7142 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7143 rtx sa_bias_rtx;
7144
7145 if (low + sa_size <= 0x8000)
7146 sa_bias = reg_offset - low, reg_offset = low;
7147 else
7148 sa_bias = reg_offset, reg_offset = 0;
7149
7150 sa_reg = gen_rtx_REG (DImode, 24);
7151 sa_bias_rtx = GEN_INT (sa_bias);
7152
7153 if (add_operand (sa_bias_rtx, DImode))
7154 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7155 else
7156 {
7157 emit_move_insn (sa_reg, sa_bias_rtx);
7158 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7159 }
7160 }
7161
7162 /* Save regs in stack order. Beginning with VMS PV. */
7163 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7164 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7165
7166 /* Save register RA next. */
7167 if (imask & (1UL << REG_RA))
7168 {
7169 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7170 imask &= ~(1UL << REG_RA);
7171 reg_offset += 8;
7172 }
7173
7174 /* Now save any other registers required to be saved. */
7175 for (i = 0; i < 31; i++)
7176 if (imask & (1UL << i))
7177 {
7178 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7179 reg_offset += 8;
7180 }
7181
7182 for (i = 0; i < 31; i++)
7183 if (fmask & (1UL << i))
7184 {
7185 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7186 reg_offset += 8;
7187 }
7188 }
7189 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7190 {
7191 /* The standard frame on the T3E includes space for saving registers.
7192 We just have to use it. We don't have to save the return address and
7193 the old frame pointer here - they are saved in the DSIB. */
7194
7195 reg_offset = -56;
7196 for (i = 9; i < 15; i++)
7197 if (imask & (1UL << i))
7198 {
7199 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7200 reg_offset -= 8;
7201 }
7202 for (i = 2; i < 10; i++)
7203 if (fmask & (1UL << i))
7204 {
7205 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7206 reg_offset -= 8;
7207 }
7208 }
7209
7210 if (TARGET_ABI_OPEN_VMS)
7211 {
7212 if (alpha_procedure_type == PT_REGISTER)
7213 /* Register frame procedures save the fp.
7214 ?? Ought to have a dwarf2 save for this. */
7215 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7216 hard_frame_pointer_rtx);
7217
7218 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7219 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7220 gen_rtx_REG (DImode, REG_PV)));
7221
7222 if (alpha_procedure_type != PT_NULL
7223 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7224 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7225
7226 /* If we have to allocate space for outgoing args, do it now. */
7227 if (current_function_outgoing_args_size != 0)
7228 {
7229 rtx seq
7230 = emit_move_insn (stack_pointer_rtx,
7231 plus_constant
7232 (hard_frame_pointer_rtx,
7233 - (ALPHA_ROUND
7234 (current_function_outgoing_args_size))));
7235
7236 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7237 if ! frame_pointer_needed. Setting the bit will change the CFA
7238 computation rule to use sp again, which would be wrong if we had
7239 frame_pointer_needed, as this means sp might move unpredictably
7240 later on.
7241
7242 Also, note that
7243 frame_pointer_needed
7244 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7245 and
7246 current_function_outgoing_args_size != 0
7247 => alpha_procedure_type != PT_NULL,
7248
7249 so when we are not setting the bit here, we are guaranteed to
7250 have emitted an FRP frame pointer update just before. */
7251 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7252 }
7253 }
7254 else if (!TARGET_ABI_UNICOSMK)
7255 {
7256 /* If we need a frame pointer, set it from the stack pointer. */
7257 if (frame_pointer_needed)
7258 {
7259 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7260 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7261 else
7262 /* This must always be the last instruction in the
7263 prologue, thus we emit a special move + clobber. */
7264 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7265 stack_pointer_rtx, sa_reg)));
7266 }
7267 }
7268
7269 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7270 the prologue, for exception handling reasons, we cannot do this for
7271 any insn that might fault. We could prevent this for mems with a
7272 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7273 have to prevent all such scheduling with a blockage.
7274
7275 Linux, on the other hand, never bothered to implement OSF/1's
7276 exception handling, and so doesn't care about such things. Anyone
7277 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7278
7279 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7280 emit_insn (gen_blockage ());
7281 }
7282
7283 /* Count the number of .file directives, so that .loc is up to date. */
7284 int num_source_filenames = 0;
7285
7286 /* Output the textual info surrounding the prologue. */
7287
7288 void
7289 alpha_start_function (FILE *file, const char *fnname,
7290 tree decl ATTRIBUTE_UNUSED)
7291 {
7292 unsigned long imask = 0;
7293 unsigned long fmask = 0;
7294 /* Stack space needed for pushing registers clobbered by us. */
7295 HOST_WIDE_INT sa_size;
7296 /* Complete stack size needed. */
7297 unsigned HOST_WIDE_INT frame_size;
7298 /* Offset from base reg to register save area. */
7299 HOST_WIDE_INT reg_offset;
7300 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7301 int i;
7302
7303 /* Don't emit an extern directive for functions defined in the same file. */
7304 if (TARGET_ABI_UNICOSMK)
7305 {
7306 tree name_tree;
7307 name_tree = get_identifier (fnname);
7308 TREE_ASM_WRITTEN (name_tree) = 1;
7309 }
7310
7311 alpha_fnname = fnname;
7312 sa_size = alpha_sa_size ();
7313
7314 frame_size = get_frame_size ();
7315 if (TARGET_ABI_OPEN_VMS)
7316 frame_size = ALPHA_ROUND (sa_size
7317 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7318 + frame_size
7319 + current_function_pretend_args_size);
7320 else if (TARGET_ABI_UNICOSMK)
7321 frame_size = ALPHA_ROUND (sa_size
7322 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7323 + ALPHA_ROUND (frame_size
7324 + current_function_outgoing_args_size);
7325 else
7326 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7327 + sa_size
7328 + ALPHA_ROUND (frame_size
7329 + current_function_pretend_args_size));
7330
7331 if (TARGET_ABI_OPEN_VMS)
7332 reg_offset = 8;
7333 else
7334 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7335
7336 alpha_sa_mask (&imask, &fmask);
7337
7338 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7339 We have to do that before the .ent directive as we cannot switch
7340 files within procedures with native ecoff because line numbers are
7341 linked to procedure descriptors.
7342 Outputting the lineno helps debugging of one line functions as they
7343 would otherwise get no line number at all. Please note that we would
7344 like to put out last_linenum from final.c, but it is not accessible. */
7345
7346 if (write_symbols == SDB_DEBUG)
7347 {
7348 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7349 ASM_OUTPUT_SOURCE_FILENAME (file,
7350 DECL_SOURCE_FILE (current_function_decl));
7351 #endif
7352 #ifdef SDB_OUTPUT_SOURCE_LINE
7353 if (debug_info_level != DINFO_LEVEL_TERSE)
7354 SDB_OUTPUT_SOURCE_LINE (file,
7355 DECL_SOURCE_LINE (current_function_decl));
7356 #endif
7357 }
7358
7359 /* Issue function start and label. */
7360 if (TARGET_ABI_OPEN_VMS
7361 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7362 {
7363 fputs ("\t.ent ", file);
7364 assemble_name (file, fnname);
7365 putc ('\n', file);
7366
7367 /* If the function needs GP, we'll write the "..ng" label there.
7368 Otherwise, do it here. */
7369 if (TARGET_ABI_OSF
7370 && ! alpha_function_needs_gp
7371 && ! current_function_is_thunk)
7372 {
7373 putc ('$', file);
7374 assemble_name (file, fnname);
7375 fputs ("..ng:\n", file);
7376 }
7377 }
7378
7379 strcpy (entry_label, fnname);
7380 if (TARGET_ABI_OPEN_VMS)
7381 strcat (entry_label, "..en");
7382
7383 /* For public functions, the label must be globalized by appending an
7384 additional colon. */
7385 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7386 strcat (entry_label, ":");
7387
7388 ASM_OUTPUT_LABEL (file, entry_label);
7389 inside_function = TRUE;
7390
7391 if (TARGET_ABI_OPEN_VMS)
7392 fprintf (file, "\t.base $%d\n", vms_base_regno);
7393
7394 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7395 && !flag_inhibit_size_directive)
7396 {
7397 /* Set flags in procedure descriptor to request IEEE-conformant
7398 math-library routines. The value we set it to is PDSC_EXC_IEEE
7399 (/usr/include/pdsc.h). */
7400 fputs ("\t.eflag 48\n", file);
7401 }
7402
7403 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7404 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7405 alpha_arg_offset = -frame_size + 48;
7406
7407 /* Describe our frame. If the frame size is larger than an integer,
7408 print it as zero to avoid an assembler error. We won't be
7409 properly describing such a frame, but that's the best we can do. */
7410 if (TARGET_ABI_UNICOSMK)
7411 ;
7412 else if (TARGET_ABI_OPEN_VMS)
7413 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7414 HOST_WIDE_INT_PRINT_DEC "\n",
7415 vms_unwind_regno,
7416 frame_size >= (1UL << 31) ? 0 : frame_size,
7417 reg_offset);
7418 else if (!flag_inhibit_size_directive)
7419 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7420 (frame_pointer_needed
7421 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7422 frame_size >= (1UL << 31) ? 0 : frame_size,
7423 current_function_pretend_args_size);
7424
7425 /* Describe which registers were spilled. */
7426 if (TARGET_ABI_UNICOSMK)
7427 ;
7428 else if (TARGET_ABI_OPEN_VMS)
7429 {
7430 if (imask)
7431 /* ??? Does VMS care if mask contains ra? The old code didn't
7432 set it, so I don't here. */
7433 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7434 if (fmask)
7435 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7436 if (alpha_procedure_type == PT_REGISTER)
7437 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7438 }
7439 else if (!flag_inhibit_size_directive)
7440 {
7441 if (imask)
7442 {
7443 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7444 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7445
7446 for (i = 0; i < 32; ++i)
7447 if (imask & (1UL << i))
7448 reg_offset += 8;
7449 }
7450
7451 if (fmask)
7452 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7453 frame_size >= (1UL << 31) ? 0 : reg_offset - frame_size);
7454 }
7455
7456 #if TARGET_ABI_OPEN_VMS
7457 /* Ifdef'ed cause link_section are only available then. */
7458 readonly_data_section ();
7459 fprintf (file, "\t.align 3\n");
7460 assemble_name (file, fnname); fputs ("..na:\n", file);
7461 fputs ("\t.ascii \"", file);
7462 assemble_name (file, fnname);
7463 fputs ("\\0\"\n", file);
7464 alpha_need_linkage (fnname, 1);
7465 text_section ();
7466 #endif
7467 }
7468
7469 /* Emit the .prologue note at the scheduled end of the prologue. */
7470
7471 static void
7472 alpha_output_function_end_prologue (FILE *file)
7473 {
7474 if (TARGET_ABI_UNICOSMK)
7475 ;
7476 else if (TARGET_ABI_OPEN_VMS)
7477 fputs ("\t.prologue\n", file);
7478 else if (TARGET_ABI_WINDOWS_NT)
7479 fputs ("\t.prologue 0\n", file);
7480 else if (!flag_inhibit_size_directive)
7481 fprintf (file, "\t.prologue %d\n",
7482 alpha_function_needs_gp || current_function_is_thunk);
7483 }
7484
7485 /* Write function epilogue. */
7486
7487 /* ??? At some point we will want to support full unwind, and so will
7488 need to mark the epilogue as well. At the moment, we just confuse
7489 dwarf2out. */
7490 #undef FRP
7491 #define FRP(exp) exp
7492
7493 void
7494 alpha_expand_epilogue (void)
7495 {
7496 /* Registers to save. */
7497 unsigned long imask = 0;
7498 unsigned long fmask = 0;
7499 /* Stack space needed for pushing registers clobbered by us. */
7500 HOST_WIDE_INT sa_size;
7501 /* Complete stack size needed. */
7502 HOST_WIDE_INT frame_size;
7503 /* Offset from base reg to register save area. */
7504 HOST_WIDE_INT reg_offset;
7505 int fp_is_frame_pointer, fp_offset;
7506 rtx sa_reg, sa_reg_exp = NULL;
7507 rtx sp_adj1, sp_adj2, mem;
7508 rtx eh_ofs;
7509 int i;
7510
7511 sa_size = alpha_sa_size ();
7512
7513 frame_size = get_frame_size ();
7514 if (TARGET_ABI_OPEN_VMS)
7515 frame_size = ALPHA_ROUND (sa_size
7516 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7517 + frame_size
7518 + current_function_pretend_args_size);
7519 else if (TARGET_ABI_UNICOSMK)
7520 frame_size = ALPHA_ROUND (sa_size
7521 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7522 + ALPHA_ROUND (frame_size
7523 + current_function_outgoing_args_size);
7524 else
7525 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7526 + sa_size
7527 + ALPHA_ROUND (frame_size
7528 + current_function_pretend_args_size));
7529
7530 if (TARGET_ABI_OPEN_VMS)
7531 {
7532 if (alpha_procedure_type == PT_STACK)
7533 reg_offset = 8;
7534 else
7535 reg_offset = 0;
7536 }
7537 else
7538 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7539
7540 alpha_sa_mask (&imask, &fmask);
7541
7542 fp_is_frame_pointer
7543 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7544 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
7545 fp_offset = 0;
7546 sa_reg = stack_pointer_rtx;
7547
7548 if (current_function_calls_eh_return)
7549 eh_ofs = EH_RETURN_STACKADJ_RTX;
7550 else
7551 eh_ofs = NULL_RTX;
7552
7553 if (!TARGET_ABI_UNICOSMK && sa_size)
7554 {
7555 /* If we have a frame pointer, restore SP from it. */
7556 if ((TARGET_ABI_OPEN_VMS
7557 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7558 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
7559 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
7560
7561 /* Cope with very large offsets to the register save area. */
7562 if (reg_offset + sa_size > 0x8000)
7563 {
7564 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7565 HOST_WIDE_INT bias;
7566
7567 if (low + sa_size <= 0x8000)
7568 bias = reg_offset - low, reg_offset = low;
7569 else
7570 bias = reg_offset, reg_offset = 0;
7571
7572 sa_reg = gen_rtx_REG (DImode, 22);
7573 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
7574
7575 FRP (emit_move_insn (sa_reg, sa_reg_exp));
7576 }
7577
7578 /* Restore registers in order, excepting a true frame pointer. */
7579
7580 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
7581 if (! eh_ofs)
7582 set_mem_alias_set (mem, alpha_sr_alias_set);
7583 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7584
7585 reg_offset += 8;
7586 imask &= ~(1UL << REG_RA);
7587
7588 for (i = 0; i < 31; ++i)
7589 if (imask & (1UL << i))
7590 {
7591 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
7592 fp_offset = reg_offset;
7593 else
7594 {
7595 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
7596 set_mem_alias_set (mem, alpha_sr_alias_set);
7597 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7598 }
7599 reg_offset += 8;
7600 }
7601
7602 for (i = 0; i < 31; ++i)
7603 if (fmask & (1UL << i))
7604 {
7605 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
7606 set_mem_alias_set (mem, alpha_sr_alias_set);
7607 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7608 reg_offset += 8;
7609 }
7610 }
7611 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7612 {
7613 /* Restore callee-saved general-purpose registers. */
7614
7615 reg_offset = -56;
7616
7617 for (i = 9; i < 15; i++)
7618 if (imask & (1UL << i))
7619 {
7620 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
7621 reg_offset));
7622 set_mem_alias_set (mem, alpha_sr_alias_set);
7623 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
7624 reg_offset -= 8;
7625 }
7626
7627 for (i = 2; i < 10; i++)
7628 if (fmask & (1UL << i))
7629 {
7630 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
7631 reg_offset));
7632 set_mem_alias_set (mem, alpha_sr_alias_set);
7633 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
7634 reg_offset -= 8;
7635 }
7636
7637 /* Restore the return address from the DSIB. */
7638
7639 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
7640 set_mem_alias_set (mem, alpha_sr_alias_set);
7641 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
7642 }
7643
7644 if (frame_size || eh_ofs)
7645 {
7646 sp_adj1 = stack_pointer_rtx;
7647
7648 if (eh_ofs)
7649 {
7650 sp_adj1 = gen_rtx_REG (DImode, 23);
7651 emit_move_insn (sp_adj1,
7652 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
7653 }
7654
7655 /* If the stack size is large, begin computation into a temporary
7656 register so as not to interfere with a potential fp restore,
7657 which must be consecutive with an SP restore. */
7658 if (frame_size < 32768
7659 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
7660 sp_adj2 = GEN_INT (frame_size);
7661 else if (TARGET_ABI_UNICOSMK)
7662 {
7663 sp_adj1 = gen_rtx_REG (DImode, 23);
7664 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
7665 sp_adj2 = const0_rtx;
7666 }
7667 else if (frame_size < 0x40007fffL)
7668 {
7669 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7670
7671 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
7672 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
7673 sp_adj1 = sa_reg;
7674 else
7675 {
7676 sp_adj1 = gen_rtx_REG (DImode, 23);
7677 FRP (emit_move_insn (sp_adj1, sp_adj2));
7678 }
7679 sp_adj2 = GEN_INT (low);
7680 }
7681 else
7682 {
7683 rtx tmp = gen_rtx_REG (DImode, 23);
7684 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
7685 3, false));
7686 if (!sp_adj2)
7687 {
7688 /* We can't drop new things to memory this late, afaik,
7689 so build it up by pieces. */
7690 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
7691 -(frame_size < 0)));
7692 if (!sp_adj2)
7693 abort ();
7694 }
7695 }
7696
7697 /* From now on, things must be in order. So emit blockages. */
7698
7699 /* Restore the frame pointer. */
7700 if (TARGET_ABI_UNICOSMK)
7701 {
7702 emit_insn (gen_blockage ());
7703 mem = gen_rtx_MEM (DImode,
7704 plus_constant (hard_frame_pointer_rtx, -16));
7705 set_mem_alias_set (mem, alpha_sr_alias_set);
7706 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7707 }
7708 else if (fp_is_frame_pointer)
7709 {
7710 emit_insn (gen_blockage ());
7711 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
7712 set_mem_alias_set (mem, alpha_sr_alias_set);
7713 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
7714 }
7715 else if (TARGET_ABI_OPEN_VMS)
7716 {
7717 emit_insn (gen_blockage ());
7718 FRP (emit_move_insn (hard_frame_pointer_rtx,
7719 gen_rtx_REG (DImode, vms_save_fp_regno)));
7720 }
7721
7722 /* Restore the stack pointer. */
7723 emit_insn (gen_blockage ());
7724 if (sp_adj2 == const0_rtx)
7725 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
7726 else
7727 FRP (emit_move_insn (stack_pointer_rtx,
7728 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
7729 }
7730 else
7731 {
7732 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
7733 {
7734 emit_insn (gen_blockage ());
7735 FRP (emit_move_insn (hard_frame_pointer_rtx,
7736 gen_rtx_REG (DImode, vms_save_fp_regno)));
7737 }
7738 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
7739 {
7740 /* Decrement the frame pointer if the function does not have a
7741 frame. */
7742
7743 emit_insn (gen_blockage ());
7744 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
7745 hard_frame_pointer_rtx, constm1_rtx)));
7746 }
7747 }
7748 }
7749 \f
7750 /* Output the rest of the textual info surrounding the epilogue. */
7751
7752 void
7753 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
7754 {
7755 #if TARGET_ABI_OPEN_VMS
7756 alpha_write_linkage (file, fnname, decl);
7757 #endif
7758
7759 /* End the function. */
7760 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
7761 {
7762 fputs ("\t.end ", file);
7763 assemble_name (file, fnname);
7764 putc ('\n', file);
7765 }
7766 inside_function = FALSE;
7767
7768 /* Output jump tables and the static subroutine information block. */
7769 if (TARGET_ABI_UNICOSMK)
7770 {
7771 unicosmk_output_ssib (file, fnname);
7772 unicosmk_output_deferred_case_vectors (file);
7773 }
7774 }
7775
7776 #if TARGET_ABI_OSF
7777 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
7778
7779 In order to avoid the hordes of differences between generated code
7780 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
7781 lots of code loading up large constants, generate rtl and emit it
7782 instead of going straight to text.
7783
7784 Not sure why this idea hasn't been explored before... */
7785
7786 static void
7787 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7788 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7789 tree function)
7790 {
7791 HOST_WIDE_INT hi, lo;
7792 rtx this, insn, funexp;
7793
7794 reset_block_changes ();
7795
7796 /* We always require a valid GP. */
7797 emit_insn (gen_prologue_ldgp ());
7798 emit_note (NOTE_INSN_PROLOGUE_END);
7799
7800 /* Find the "this" pointer. If the function returns a structure,
7801 the structure return pointer is in $16. */
7802 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7803 this = gen_rtx_REG (Pmode, 17);
7804 else
7805 this = gen_rtx_REG (Pmode, 16);
7806
7807 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
7808 entire constant for the add. */
7809 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
7810 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7811 if (hi + lo == delta)
7812 {
7813 if (hi)
7814 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
7815 if (lo)
7816 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
7817 }
7818 else
7819 {
7820 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
7821 delta, -(delta < 0));
7822 emit_insn (gen_adddi3 (this, this, tmp));
7823 }
7824
7825 /* Add a delta stored in the vtable at VCALL_OFFSET. */
7826 if (vcall_offset)
7827 {
7828 rtx tmp, tmp2;
7829
7830 tmp = gen_rtx_REG (Pmode, 0);
7831 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
7832
7833 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
7834 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7835 if (hi + lo == vcall_offset)
7836 {
7837 if (hi)
7838 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
7839 }
7840 else
7841 {
7842 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
7843 vcall_offset, -(vcall_offset < 0));
7844 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
7845 lo = 0;
7846 }
7847 if (lo)
7848 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
7849 else
7850 tmp2 = tmp;
7851 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
7852
7853 emit_insn (gen_adddi3 (this, this, tmp));
7854 }
7855
7856 /* Generate a tail call to the target function. */
7857 if (! TREE_USED (function))
7858 {
7859 assemble_external (function);
7860 TREE_USED (function) = 1;
7861 }
7862 funexp = XEXP (DECL_RTL (function), 0);
7863 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
7864 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
7865 SIBLING_CALL_P (insn) = 1;
7866
7867 /* Run just enough of rest_of_compilation to get the insns emitted.
7868 There's not really enough bulk here to make other passes such as
7869 instruction scheduling worth while. Note that use_thunk calls
7870 assemble_start_function and assemble_end_function. */
7871 insn = get_insns ();
7872 insn_locators_initialize ();
7873 shorten_branches (insn);
7874 final_start_function (insn, file, 1);
7875 final (insn, file, 1, 0);
7876 final_end_function ();
7877 }
7878 #endif /* TARGET_ABI_OSF */
7879 \f
7880 /* Debugging support. */
7881
7882 #include "gstab.h"
7883
7884 /* Count the number of sdb related labels are generated (to find block
7885 start and end boundaries). */
7886
7887 int sdb_label_count = 0;
7888
7889 /* Name of the file containing the current function. */
7890
7891 static const char *current_function_file = "";
7892
7893 /* Offsets to alpha virtual arg/local debugging pointers. */
7894
7895 long alpha_arg_offset;
7896 long alpha_auto_offset;
7897 \f
7898 /* Emit a new filename to a stream. */
7899
7900 void
7901 alpha_output_filename (FILE *stream, const char *name)
7902 {
7903 static int first_time = TRUE;
7904
7905 if (first_time)
7906 {
7907 first_time = FALSE;
7908 ++num_source_filenames;
7909 current_function_file = name;
7910 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7911 output_quoted_string (stream, name);
7912 fprintf (stream, "\n");
7913 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
7914 fprintf (stream, "\t#@stabs\n");
7915 }
7916
7917 else if (write_symbols == DBX_DEBUG)
7918 /* dbxout.c will emit an appropriate .stabs directive. */
7919 return;
7920
7921 else if (name != current_function_file
7922 && strcmp (name, current_function_file) != 0)
7923 {
7924 if (inside_function && ! TARGET_GAS)
7925 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
7926 else
7927 {
7928 ++num_source_filenames;
7929 current_function_file = name;
7930 fprintf (stream, "\t.file\t%d ", num_source_filenames);
7931 }
7932
7933 output_quoted_string (stream, name);
7934 fprintf (stream, "\n");
7935 }
7936 }
7937 \f
7938 /* Structure to show the current status of registers and memory. */
7939
7940 struct shadow_summary
7941 {
7942 struct {
7943 unsigned int i : 31; /* Mask of int regs */
7944 unsigned int fp : 31; /* Mask of fp regs */
7945 unsigned int mem : 1; /* mem == imem | fpmem */
7946 } used, defd;
7947 };
7948
7949 /* Summary the effects of expression X on the machine. Update SUM, a pointer
7950 to the summary structure. SET is nonzero if the insn is setting the
7951 object, otherwise zero. */
7952
7953 static void
7954 summarize_insn (rtx x, struct shadow_summary *sum, int set)
7955 {
7956 const char *format_ptr;
7957 int i, j;
7958
7959 if (x == 0)
7960 return;
7961
7962 switch (GET_CODE (x))
7963 {
7964 /* ??? Note that this case would be incorrect if the Alpha had a
7965 ZERO_EXTRACT in SET_DEST. */
7966 case SET:
7967 summarize_insn (SET_SRC (x), sum, 0);
7968 summarize_insn (SET_DEST (x), sum, 1);
7969 break;
7970
7971 case CLOBBER:
7972 summarize_insn (XEXP (x, 0), sum, 1);
7973 break;
7974
7975 case USE:
7976 summarize_insn (XEXP (x, 0), sum, 0);
7977 break;
7978
7979 case ASM_OPERANDS:
7980 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
7981 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
7982 break;
7983
7984 case PARALLEL:
7985 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
7986 summarize_insn (XVECEXP (x, 0, i), sum, 0);
7987 break;
7988
7989 case SUBREG:
7990 summarize_insn (SUBREG_REG (x), sum, 0);
7991 break;
7992
7993 case REG:
7994 {
7995 int regno = REGNO (x);
7996 unsigned long mask = ((unsigned long) 1) << (regno % 32);
7997
7998 if (regno == 31 || regno == 63)
7999 break;
8000
8001 if (set)
8002 {
8003 if (regno < 32)
8004 sum->defd.i |= mask;
8005 else
8006 sum->defd.fp |= mask;
8007 }
8008 else
8009 {
8010 if (regno < 32)
8011 sum->used.i |= mask;
8012 else
8013 sum->used.fp |= mask;
8014 }
8015 }
8016 break;
8017
8018 case MEM:
8019 if (set)
8020 sum->defd.mem = 1;
8021 else
8022 sum->used.mem = 1;
8023
8024 /* Find the regs used in memory address computation: */
8025 summarize_insn (XEXP (x, 0), sum, 0);
8026 break;
8027
8028 case CONST_INT: case CONST_DOUBLE:
8029 case SYMBOL_REF: case LABEL_REF: case CONST:
8030 case SCRATCH: case ASM_INPUT:
8031 break;
8032
8033 /* Handle common unary and binary ops for efficiency. */
8034 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8035 case MOD: case UDIV: case UMOD: case AND: case IOR:
8036 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8037 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8038 case NE: case EQ: case GE: case GT: case LE:
8039 case LT: case GEU: case GTU: case LEU: case LTU:
8040 summarize_insn (XEXP (x, 0), sum, 0);
8041 summarize_insn (XEXP (x, 1), sum, 0);
8042 break;
8043
8044 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8045 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8046 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8047 case SQRT: case FFS:
8048 summarize_insn (XEXP (x, 0), sum, 0);
8049 break;
8050
8051 default:
8052 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8053 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8054 switch (format_ptr[i])
8055 {
8056 case 'e':
8057 summarize_insn (XEXP (x, i), sum, 0);
8058 break;
8059
8060 case 'E':
8061 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8062 summarize_insn (XVECEXP (x, i, j), sum, 0);
8063 break;
8064
8065 case 'i':
8066 break;
8067
8068 default:
8069 abort ();
8070 }
8071 }
8072 }
8073
8074 /* Ensure a sufficient number of `trapb' insns are in the code when
8075 the user requests code with a trap precision of functions or
8076 instructions.
8077
8078 In naive mode, when the user requests a trap-precision of
8079 "instruction", a trapb is needed after every instruction that may
8080 generate a trap. This ensures that the code is resumption safe but
8081 it is also slow.
8082
8083 When optimizations are turned on, we delay issuing a trapb as long
8084 as possible. In this context, a trap shadow is the sequence of
8085 instructions that starts with a (potentially) trap generating
8086 instruction and extends to the next trapb or call_pal instruction
8087 (but GCC never generates call_pal by itself). We can delay (and
8088 therefore sometimes omit) a trapb subject to the following
8089 conditions:
8090
8091 (a) On entry to the trap shadow, if any Alpha register or memory
8092 location contains a value that is used as an operand value by some
8093 instruction in the trap shadow (live on entry), then no instruction
8094 in the trap shadow may modify the register or memory location.
8095
8096 (b) Within the trap shadow, the computation of the base register
8097 for a memory load or store instruction may not involve using the
8098 result of an instruction that might generate an UNPREDICTABLE
8099 result.
8100
8101 (c) Within the trap shadow, no register may be used more than once
8102 as a destination register. (This is to make life easier for the
8103 trap-handler.)
8104
8105 (d) The trap shadow may not include any branch instructions. */
8106
8107 static void
8108 alpha_handle_trap_shadows (void)
8109 {
8110 struct shadow_summary shadow;
8111 int trap_pending, exception_nesting;
8112 rtx i, n;
8113
8114 trap_pending = 0;
8115 exception_nesting = 0;
8116 shadow.used.i = 0;
8117 shadow.used.fp = 0;
8118 shadow.used.mem = 0;
8119 shadow.defd = shadow.used;
8120
8121 for (i = get_insns (); i ; i = NEXT_INSN (i))
8122 {
8123 if (GET_CODE (i) == NOTE)
8124 {
8125 switch (NOTE_LINE_NUMBER (i))
8126 {
8127 case NOTE_INSN_EH_REGION_BEG:
8128 exception_nesting++;
8129 if (trap_pending)
8130 goto close_shadow;
8131 break;
8132
8133 case NOTE_INSN_EH_REGION_END:
8134 exception_nesting--;
8135 if (trap_pending)
8136 goto close_shadow;
8137 break;
8138
8139 case NOTE_INSN_EPILOGUE_BEG:
8140 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8141 goto close_shadow;
8142 break;
8143 }
8144 }
8145 else if (trap_pending)
8146 {
8147 if (alpha_tp == ALPHA_TP_FUNC)
8148 {
8149 if (GET_CODE (i) == JUMP_INSN
8150 && GET_CODE (PATTERN (i)) == RETURN)
8151 goto close_shadow;
8152 }
8153 else if (alpha_tp == ALPHA_TP_INSN)
8154 {
8155 if (optimize > 0)
8156 {
8157 struct shadow_summary sum;
8158
8159 sum.used.i = 0;
8160 sum.used.fp = 0;
8161 sum.used.mem = 0;
8162 sum.defd = sum.used;
8163
8164 switch (GET_CODE (i))
8165 {
8166 case INSN:
8167 /* Annoyingly, get_attr_trap will abort on these. */
8168 if (GET_CODE (PATTERN (i)) == USE
8169 || GET_CODE (PATTERN (i)) == CLOBBER)
8170 break;
8171
8172 summarize_insn (PATTERN (i), &sum, 0);
8173
8174 if ((sum.defd.i & shadow.defd.i)
8175 || (sum.defd.fp & shadow.defd.fp))
8176 {
8177 /* (c) would be violated */
8178 goto close_shadow;
8179 }
8180
8181 /* Combine shadow with summary of current insn: */
8182 shadow.used.i |= sum.used.i;
8183 shadow.used.fp |= sum.used.fp;
8184 shadow.used.mem |= sum.used.mem;
8185 shadow.defd.i |= sum.defd.i;
8186 shadow.defd.fp |= sum.defd.fp;
8187 shadow.defd.mem |= sum.defd.mem;
8188
8189 if ((sum.defd.i & shadow.used.i)
8190 || (sum.defd.fp & shadow.used.fp)
8191 || (sum.defd.mem & shadow.used.mem))
8192 {
8193 /* (a) would be violated (also takes care of (b)) */
8194 if (get_attr_trap (i) == TRAP_YES
8195 && ((sum.defd.i & sum.used.i)
8196 || (sum.defd.fp & sum.used.fp)))
8197 abort ();
8198
8199 goto close_shadow;
8200 }
8201 break;
8202
8203 case JUMP_INSN:
8204 case CALL_INSN:
8205 case CODE_LABEL:
8206 goto close_shadow;
8207
8208 default:
8209 abort ();
8210 }
8211 }
8212 else
8213 {
8214 close_shadow:
8215 n = emit_insn_before (gen_trapb (), i);
8216 PUT_MODE (n, TImode);
8217 PUT_MODE (i, TImode);
8218 trap_pending = 0;
8219 shadow.used.i = 0;
8220 shadow.used.fp = 0;
8221 shadow.used.mem = 0;
8222 shadow.defd = shadow.used;
8223 }
8224 }
8225 }
8226
8227 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8228 && GET_CODE (i) == INSN
8229 && GET_CODE (PATTERN (i)) != USE
8230 && GET_CODE (PATTERN (i)) != CLOBBER
8231 && get_attr_trap (i) == TRAP_YES)
8232 {
8233 if (optimize && !trap_pending)
8234 summarize_insn (PATTERN (i), &shadow, 0);
8235 trap_pending = 1;
8236 }
8237 }
8238 }
8239 \f
8240 /* Alpha can only issue instruction groups simultaneously if they are
8241 suitably aligned. This is very processor-specific. */
8242
8243 enum alphaev4_pipe {
8244 EV4_STOP = 0,
8245 EV4_IB0 = 1,
8246 EV4_IB1 = 2,
8247 EV4_IBX = 4
8248 };
8249
8250 enum alphaev5_pipe {
8251 EV5_STOP = 0,
8252 EV5_NONE = 1,
8253 EV5_E01 = 2,
8254 EV5_E0 = 4,
8255 EV5_E1 = 8,
8256 EV5_FAM = 16,
8257 EV5_FA = 32,
8258 EV5_FM = 64
8259 };
8260
8261 static enum alphaev4_pipe
8262 alphaev4_insn_pipe (rtx insn)
8263 {
8264 if (recog_memoized (insn) < 0)
8265 return EV4_STOP;
8266 if (get_attr_length (insn) != 4)
8267 return EV4_STOP;
8268
8269 switch (get_attr_type (insn))
8270 {
8271 case TYPE_ILD:
8272 case TYPE_FLD:
8273 return EV4_IBX;
8274
8275 case TYPE_LDSYM:
8276 case TYPE_IADD:
8277 case TYPE_ILOG:
8278 case TYPE_ICMOV:
8279 case TYPE_ICMP:
8280 case TYPE_IST:
8281 case TYPE_FST:
8282 case TYPE_SHIFT:
8283 case TYPE_IMUL:
8284 case TYPE_FBR:
8285 return EV4_IB0;
8286
8287 case TYPE_MISC:
8288 case TYPE_IBR:
8289 case TYPE_JSR:
8290 case TYPE_CALLPAL:
8291 case TYPE_FCPYS:
8292 case TYPE_FCMOV:
8293 case TYPE_FADD:
8294 case TYPE_FDIV:
8295 case TYPE_FMUL:
8296 return EV4_IB1;
8297
8298 default:
8299 abort ();
8300 }
8301 }
8302
8303 static enum alphaev5_pipe
8304 alphaev5_insn_pipe (rtx insn)
8305 {
8306 if (recog_memoized (insn) < 0)
8307 return EV5_STOP;
8308 if (get_attr_length (insn) != 4)
8309 return EV5_STOP;
8310
8311 switch (get_attr_type (insn))
8312 {
8313 case TYPE_ILD:
8314 case TYPE_FLD:
8315 case TYPE_LDSYM:
8316 case TYPE_IADD:
8317 case TYPE_ILOG:
8318 case TYPE_ICMOV:
8319 case TYPE_ICMP:
8320 return EV5_E01;
8321
8322 case TYPE_IST:
8323 case TYPE_FST:
8324 case TYPE_SHIFT:
8325 case TYPE_IMUL:
8326 case TYPE_MISC:
8327 case TYPE_MVI:
8328 return EV5_E0;
8329
8330 case TYPE_IBR:
8331 case TYPE_JSR:
8332 case TYPE_CALLPAL:
8333 return EV5_E1;
8334
8335 case TYPE_FCPYS:
8336 return EV5_FAM;
8337
8338 case TYPE_FBR:
8339 case TYPE_FCMOV:
8340 case TYPE_FADD:
8341 case TYPE_FDIV:
8342 return EV5_FA;
8343
8344 case TYPE_FMUL:
8345 return EV5_FM;
8346
8347 default:
8348 abort();
8349 }
8350 }
8351
8352 /* IN_USE is a mask of the slots currently filled within the insn group.
8353 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8354 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8355
8356 LEN is, of course, the length of the group in bytes. */
8357
8358 static rtx
8359 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8360 {
8361 int len, in_use;
8362
8363 len = in_use = 0;
8364
8365 if (! INSN_P (insn)
8366 || GET_CODE (PATTERN (insn)) == CLOBBER
8367 || GET_CODE (PATTERN (insn)) == USE)
8368 goto next_and_done;
8369
8370 while (1)
8371 {
8372 enum alphaev4_pipe pipe;
8373
8374 pipe = alphaev4_insn_pipe (insn);
8375 switch (pipe)
8376 {
8377 case EV4_STOP:
8378 /* Force complex instructions to start new groups. */
8379 if (in_use)
8380 goto done;
8381
8382 /* If this is a completely unrecognized insn, its an asm.
8383 We don't know how long it is, so record length as -1 to
8384 signal a needed realignment. */
8385 if (recog_memoized (insn) < 0)
8386 len = -1;
8387 else
8388 len = get_attr_length (insn);
8389 goto next_and_done;
8390
8391 case EV4_IBX:
8392 if (in_use & EV4_IB0)
8393 {
8394 if (in_use & EV4_IB1)
8395 goto done;
8396 in_use |= EV4_IB1;
8397 }
8398 else
8399 in_use |= EV4_IB0 | EV4_IBX;
8400 break;
8401
8402 case EV4_IB0:
8403 if (in_use & EV4_IB0)
8404 {
8405 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8406 goto done;
8407 in_use |= EV4_IB1;
8408 }
8409 in_use |= EV4_IB0;
8410 break;
8411
8412 case EV4_IB1:
8413 if (in_use & EV4_IB1)
8414 goto done;
8415 in_use |= EV4_IB1;
8416 break;
8417
8418 default:
8419 abort();
8420 }
8421 len += 4;
8422
8423 /* Haifa doesn't do well scheduling branches. */
8424 if (GET_CODE (insn) == JUMP_INSN)
8425 goto next_and_done;
8426
8427 next:
8428 insn = next_nonnote_insn (insn);
8429
8430 if (!insn || ! INSN_P (insn))
8431 goto done;
8432
8433 /* Let Haifa tell us where it thinks insn group boundaries are. */
8434 if (GET_MODE (insn) == TImode)
8435 goto done;
8436
8437 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8438 goto next;
8439 }
8440
8441 next_and_done:
8442 insn = next_nonnote_insn (insn);
8443
8444 done:
8445 *plen = len;
8446 *pin_use = in_use;
8447 return insn;
8448 }
8449
8450 /* IN_USE is a mask of the slots currently filled within the insn group.
8451 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8452 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8453
8454 LEN is, of course, the length of the group in bytes. */
8455
8456 static rtx
8457 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8458 {
8459 int len, in_use;
8460
8461 len = in_use = 0;
8462
8463 if (! INSN_P (insn)
8464 || GET_CODE (PATTERN (insn)) == CLOBBER
8465 || GET_CODE (PATTERN (insn)) == USE)
8466 goto next_and_done;
8467
8468 while (1)
8469 {
8470 enum alphaev5_pipe pipe;
8471
8472 pipe = alphaev5_insn_pipe (insn);
8473 switch (pipe)
8474 {
8475 case EV5_STOP:
8476 /* Force complex instructions to start new groups. */
8477 if (in_use)
8478 goto done;
8479
8480 /* If this is a completely unrecognized insn, its an asm.
8481 We don't know how long it is, so record length as -1 to
8482 signal a needed realignment. */
8483 if (recog_memoized (insn) < 0)
8484 len = -1;
8485 else
8486 len = get_attr_length (insn);
8487 goto next_and_done;
8488
8489 /* ??? Most of the places below, we would like to abort, as
8490 it would indicate an error either in Haifa, or in the
8491 scheduling description. Unfortunately, Haifa never
8492 schedules the last instruction of the BB, so we don't
8493 have an accurate TI bit to go off. */
8494 case EV5_E01:
8495 if (in_use & EV5_E0)
8496 {
8497 if (in_use & EV5_E1)
8498 goto done;
8499 in_use |= EV5_E1;
8500 }
8501 else
8502 in_use |= EV5_E0 | EV5_E01;
8503 break;
8504
8505 case EV5_E0:
8506 if (in_use & EV5_E0)
8507 {
8508 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8509 goto done;
8510 in_use |= EV5_E1;
8511 }
8512 in_use |= EV5_E0;
8513 break;
8514
8515 case EV5_E1:
8516 if (in_use & EV5_E1)
8517 goto done;
8518 in_use |= EV5_E1;
8519 break;
8520
8521 case EV5_FAM:
8522 if (in_use & EV5_FA)
8523 {
8524 if (in_use & EV5_FM)
8525 goto done;
8526 in_use |= EV5_FM;
8527 }
8528 else
8529 in_use |= EV5_FA | EV5_FAM;
8530 break;
8531
8532 case EV5_FA:
8533 if (in_use & EV5_FA)
8534 goto done;
8535 in_use |= EV5_FA;
8536 break;
8537
8538 case EV5_FM:
8539 if (in_use & EV5_FM)
8540 goto done;
8541 in_use |= EV5_FM;
8542 break;
8543
8544 case EV5_NONE:
8545 break;
8546
8547 default:
8548 abort();
8549 }
8550 len += 4;
8551
8552 /* Haifa doesn't do well scheduling branches. */
8553 /* ??? If this is predicted not-taken, slotting continues, except
8554 that no more IBR, FBR, or JSR insns may be slotted. */
8555 if (GET_CODE (insn) == JUMP_INSN)
8556 goto next_and_done;
8557
8558 next:
8559 insn = next_nonnote_insn (insn);
8560
8561 if (!insn || ! INSN_P (insn))
8562 goto done;
8563
8564 /* Let Haifa tell us where it thinks insn group boundaries are. */
8565 if (GET_MODE (insn) == TImode)
8566 goto done;
8567
8568 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8569 goto next;
8570 }
8571
8572 next_and_done:
8573 insn = next_nonnote_insn (insn);
8574
8575 done:
8576 *plen = len;
8577 *pin_use = in_use;
8578 return insn;
8579 }
8580
8581 static rtx
8582 alphaev4_next_nop (int *pin_use)
8583 {
8584 int in_use = *pin_use;
8585 rtx nop;
8586
8587 if (!(in_use & EV4_IB0))
8588 {
8589 in_use |= EV4_IB0;
8590 nop = gen_nop ();
8591 }
8592 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
8593 {
8594 in_use |= EV4_IB1;
8595 nop = gen_nop ();
8596 }
8597 else if (TARGET_FP && !(in_use & EV4_IB1))
8598 {
8599 in_use |= EV4_IB1;
8600 nop = gen_fnop ();
8601 }
8602 else
8603 nop = gen_unop ();
8604
8605 *pin_use = in_use;
8606 return nop;
8607 }
8608
8609 static rtx
8610 alphaev5_next_nop (int *pin_use)
8611 {
8612 int in_use = *pin_use;
8613 rtx nop;
8614
8615 if (!(in_use & EV5_E1))
8616 {
8617 in_use |= EV5_E1;
8618 nop = gen_nop ();
8619 }
8620 else if (TARGET_FP && !(in_use & EV5_FA))
8621 {
8622 in_use |= EV5_FA;
8623 nop = gen_fnop ();
8624 }
8625 else if (TARGET_FP && !(in_use & EV5_FM))
8626 {
8627 in_use |= EV5_FM;
8628 nop = gen_fnop ();
8629 }
8630 else
8631 nop = gen_unop ();
8632
8633 *pin_use = in_use;
8634 return nop;
8635 }
8636
8637 /* The instruction group alignment main loop. */
8638
8639 static void
8640 alpha_align_insns (unsigned int max_align,
8641 rtx (*next_group) (rtx, int *, int *),
8642 rtx (*next_nop) (int *))
8643 {
8644 /* ALIGN is the known alignment for the insn group. */
8645 unsigned int align;
8646 /* OFS is the offset of the current insn in the insn group. */
8647 int ofs;
8648 int prev_in_use, in_use, len;
8649 rtx i, next;
8650
8651 /* Let shorten branches care for assigning alignments to code labels. */
8652 shorten_branches (get_insns ());
8653
8654 if (align_functions < 4)
8655 align = 4;
8656 else if ((unsigned int) align_functions < max_align)
8657 align = align_functions;
8658 else
8659 align = max_align;
8660
8661 ofs = prev_in_use = 0;
8662 i = get_insns ();
8663 if (GET_CODE (i) == NOTE)
8664 i = next_nonnote_insn (i);
8665
8666 while (i)
8667 {
8668 next = (*next_group) (i, &in_use, &len);
8669
8670 /* When we see a label, resync alignment etc. */
8671 if (GET_CODE (i) == CODE_LABEL)
8672 {
8673 unsigned int new_align = 1 << label_to_alignment (i);
8674
8675 if (new_align >= align)
8676 {
8677 align = new_align < max_align ? new_align : max_align;
8678 ofs = 0;
8679 }
8680
8681 else if (ofs & (new_align-1))
8682 ofs = (ofs | (new_align-1)) + 1;
8683 if (len != 0)
8684 abort();
8685 }
8686
8687 /* Handle complex instructions special. */
8688 else if (in_use == 0)
8689 {
8690 /* Asms will have length < 0. This is a signal that we have
8691 lost alignment knowledge. Assume, however, that the asm
8692 will not mis-align instructions. */
8693 if (len < 0)
8694 {
8695 ofs = 0;
8696 align = 4;
8697 len = 0;
8698 }
8699 }
8700
8701 /* If the known alignment is smaller than the recognized insn group,
8702 realign the output. */
8703 else if ((int) align < len)
8704 {
8705 unsigned int new_log_align = len > 8 ? 4 : 3;
8706 rtx prev, where;
8707
8708 where = prev = prev_nonnote_insn (i);
8709 if (!where || GET_CODE (where) != CODE_LABEL)
8710 where = i;
8711
8712 /* Can't realign between a call and its gp reload. */
8713 if (! (TARGET_EXPLICIT_RELOCS
8714 && prev && GET_CODE (prev) == CALL_INSN))
8715 {
8716 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
8717 align = 1 << new_log_align;
8718 ofs = 0;
8719 }
8720 }
8721
8722 /* If the group won't fit in the same INT16 as the previous,
8723 we need to add padding to keep the group together. Rather
8724 than simply leaving the insn filling to the assembler, we
8725 can make use of the knowledge of what sorts of instructions
8726 were issued in the previous group to make sure that all of
8727 the added nops are really free. */
8728 else if (ofs + len > (int) align)
8729 {
8730 int nop_count = (align - ofs) / 4;
8731 rtx where;
8732
8733 /* Insert nops before labels, branches, and calls to truly merge
8734 the execution of the nops with the previous instruction group. */
8735 where = prev_nonnote_insn (i);
8736 if (where)
8737 {
8738 if (GET_CODE (where) == CODE_LABEL)
8739 {
8740 rtx where2 = prev_nonnote_insn (where);
8741 if (where2 && GET_CODE (where2) == JUMP_INSN)
8742 where = where2;
8743 }
8744 else if (GET_CODE (where) == INSN)
8745 where = i;
8746 }
8747 else
8748 where = i;
8749
8750 do
8751 emit_insn_before ((*next_nop)(&prev_in_use), where);
8752 while (--nop_count);
8753 ofs = 0;
8754 }
8755
8756 ofs = (ofs + len) & (align - 1);
8757 prev_in_use = in_use;
8758 i = next;
8759 }
8760 }
8761 \f
8762 /* Machine dependent reorg pass. */
8763
8764 static void
8765 alpha_reorg (void)
8766 {
8767 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
8768 alpha_handle_trap_shadows ();
8769
8770 /* Due to the number of extra trapb insns, don't bother fixing up
8771 alignment when trap precision is instruction. Moreover, we can
8772 only do our job when sched2 is run. */
8773 if (optimize && !optimize_size
8774 && alpha_tp != ALPHA_TP_INSN
8775 && flag_schedule_insns_after_reload)
8776 {
8777 if (alpha_cpu == PROCESSOR_EV4)
8778 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
8779 else if (alpha_cpu == PROCESSOR_EV5)
8780 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
8781 }
8782 }
8783 \f
8784 #if !TARGET_ABI_UNICOSMK
8785
8786 #ifdef HAVE_STAMP_H
8787 #include <stamp.h>
8788 #endif
8789
8790 static void
8791 alpha_file_start (void)
8792 {
8793 #ifdef OBJECT_FORMAT_ELF
8794 /* If emitting dwarf2 debug information, we cannot generate a .file
8795 directive to start the file, as it will conflict with dwarf2out
8796 file numbers. So it's only useful when emitting mdebug output. */
8797 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
8798 #endif
8799
8800 default_file_start ();
8801 #ifdef MS_STAMP
8802 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
8803 #endif
8804
8805 fputs ("\t.set noreorder\n", asm_out_file);
8806 fputs ("\t.set volatile\n", asm_out_file);
8807 if (!TARGET_ABI_OPEN_VMS)
8808 fputs ("\t.set noat\n", asm_out_file);
8809 if (TARGET_EXPLICIT_RELOCS)
8810 fputs ("\t.set nomacro\n", asm_out_file);
8811 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
8812 fprintf (asm_out_file,
8813 "\t.arch %s\n",
8814 TARGET_CPU_EV6 ? "ev6"
8815 : (TARGET_CPU_EV5
8816 ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5")
8817 : "ev4"));
8818 }
8819 #endif
8820
8821 #ifdef OBJECT_FORMAT_ELF
8822
8823 /* Switch to the section to which we should output X. The only thing
8824 special we do here is to honor small data. */
8825
8826 static void
8827 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
8828 unsigned HOST_WIDE_INT align)
8829 {
8830 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
8831 /* ??? Consider using mergeable sdata sections. */
8832 sdata_section ();
8833 else
8834 default_elf_select_rtx_section (mode, x, align);
8835 }
8836
8837 #endif /* OBJECT_FORMAT_ELF */
8838 \f
8839 /* Structure to collect function names for final output in link section. */
8840 /* Note that items marked with GTY can't be ifdef'ed out. */
8841
8842 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
8843 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
8844
8845 struct alpha_links GTY(())
8846 {
8847 int num;
8848 rtx linkage;
8849 enum links_kind lkind;
8850 enum reloc_kind rkind;
8851 };
8852
8853 struct alpha_funcs GTY(())
8854 {
8855 int num;
8856 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8857 links;
8858 };
8859
8860 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
8861 splay_tree alpha_links_tree;
8862 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
8863 splay_tree alpha_funcs_tree;
8864
8865 static GTY(()) int alpha_funcs_num;
8866
8867 #if TARGET_ABI_OPEN_VMS
8868
8869 /* Return the VMS argument type corresponding to MODE. */
8870
8871 enum avms_arg_type
8872 alpha_arg_type (enum machine_mode mode)
8873 {
8874 switch (mode)
8875 {
8876 case SFmode:
8877 return TARGET_FLOAT_VAX ? FF : FS;
8878 case DFmode:
8879 return TARGET_FLOAT_VAX ? FD : FT;
8880 default:
8881 return I64;
8882 }
8883 }
8884
8885 /* Return an rtx for an integer representing the VMS Argument Information
8886 register value. */
8887
8888 rtx
8889 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
8890 {
8891 unsigned HOST_WIDE_INT regval = cum.num_args;
8892 int i;
8893
8894 for (i = 0; i < 6; i++)
8895 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
8896
8897 return GEN_INT (regval);
8898 }
8899 \f
8900 /* Make (or fake) .linkage entry for function call.
8901
8902 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
8903
8904 Return an SYMBOL_REF rtx for the linkage. */
8905
8906 rtx
8907 alpha_need_linkage (const char *name, int is_local)
8908 {
8909 splay_tree_node node;
8910 struct alpha_links *al;
8911
8912 if (name[0] == '*')
8913 name++;
8914
8915 if (is_local)
8916 {
8917 struct alpha_funcs *cfaf;
8918
8919 if (!alpha_funcs_tree)
8920 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
8921 splay_tree_compare_pointers);
8922
8923 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
8924
8925 cfaf->links = 0;
8926 cfaf->num = ++alpha_funcs_num;
8927
8928 splay_tree_insert (alpha_funcs_tree,
8929 (splay_tree_key) current_function_decl,
8930 (splay_tree_value) cfaf);
8931 }
8932
8933 if (alpha_links_tree)
8934 {
8935 /* Is this name already defined? */
8936
8937 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
8938 if (node)
8939 {
8940 al = (struct alpha_links *) node->value;
8941 if (is_local)
8942 {
8943 /* Defined here but external assumed. */
8944 if (al->lkind == KIND_EXTERN)
8945 al->lkind = KIND_LOCAL;
8946 }
8947 else
8948 {
8949 /* Used here but unused assumed. */
8950 if (al->lkind == KIND_UNUSED)
8951 al->lkind = KIND_LOCAL;
8952 }
8953 return al->linkage;
8954 }
8955 }
8956 else
8957 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
8958
8959 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
8960 name = ggc_strdup (name);
8961
8962 /* Assume external if no definition. */
8963 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
8964
8965 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
8966 get_identifier (name);
8967
8968 /* Construct a SYMBOL_REF for us to call. */
8969 {
8970 size_t name_len = strlen (name);
8971 char *linksym = alloca (name_len + 6);
8972 linksym[0] = '$';
8973 memcpy (linksym + 1, name, name_len);
8974 memcpy (linksym + 1 + name_len, "..lk", 5);
8975 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
8976 ggc_alloc_string (linksym, name_len + 5));
8977 }
8978
8979 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
8980 (splay_tree_value) al);
8981
8982 return al->linkage;
8983 }
8984
8985 rtx
8986 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
8987 {
8988 splay_tree_node cfunnode;
8989 struct alpha_funcs *cfaf;
8990 struct alpha_links *al;
8991 const char *name = XSTR (linkage, 0);
8992
8993 cfaf = (struct alpha_funcs *) 0;
8994 al = (struct alpha_links *) 0;
8995
8996 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
8997 cfaf = (struct alpha_funcs *) cfunnode->value;
8998
8999 if (cfaf->links)
9000 {
9001 splay_tree_node lnode;
9002
9003 /* Is this name already defined? */
9004
9005 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9006 if (lnode)
9007 al = (struct alpha_links *) lnode->value;
9008 }
9009 else
9010 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9011
9012 if (!al)
9013 {
9014 size_t name_len;
9015 size_t buflen;
9016 char buf [512];
9017 char *linksym;
9018 splay_tree_node node = 0;
9019 struct alpha_links *anl;
9020
9021 if (name[0] == '*')
9022 name++;
9023
9024 name_len = strlen (name);
9025
9026 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9027 al->num = cfaf->num;
9028
9029 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9030 if (node)
9031 {
9032 anl = (struct alpha_links *) node->value;
9033 al->lkind = anl->lkind;
9034 }
9035
9036 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9037 buflen = strlen (buf);
9038 linksym = alloca (buflen + 1);
9039 memcpy (linksym, buf, buflen + 1);
9040
9041 al->linkage = gen_rtx_SYMBOL_REF
9042 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9043
9044 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9045 (splay_tree_value) al);
9046 }
9047
9048 if (rflag)
9049 al->rkind = KIND_CODEADDR;
9050 else
9051 al->rkind = KIND_LINKAGE;
9052
9053 if (lflag)
9054 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9055 else
9056 return al->linkage;
9057 }
9058
9059 static int
9060 alpha_write_one_linkage (splay_tree_node node, void *data)
9061 {
9062 const char *const name = (const char *) node->key;
9063 struct alpha_links *link = (struct alpha_links *) node->value;
9064 FILE *stream = (FILE *) data;
9065
9066 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9067 if (link->rkind == KIND_CODEADDR)
9068 {
9069 if (link->lkind == KIND_LOCAL)
9070 {
9071 /* Local and used */
9072 fprintf (stream, "\t.quad %s..en\n", name);
9073 }
9074 else
9075 {
9076 /* External and used, request code address. */
9077 fprintf (stream, "\t.code_address %s\n", name);
9078 }
9079 }
9080 else
9081 {
9082 if (link->lkind == KIND_LOCAL)
9083 {
9084 /* Local and used, build linkage pair. */
9085 fprintf (stream, "\t.quad %s..en\n", name);
9086 fprintf (stream, "\t.quad %s\n", name);
9087 }
9088 else
9089 {
9090 /* External and used, request linkage pair. */
9091 fprintf (stream, "\t.linkage %s\n", name);
9092 }
9093 }
9094
9095 return 0;
9096 }
9097
9098 static void
9099 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9100 {
9101 splay_tree_node node;
9102 struct alpha_funcs *func;
9103
9104 link_section ();
9105 fprintf (stream, "\t.align 3\n");
9106 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9107 func = (struct alpha_funcs *) node->value;
9108
9109 fputs ("\t.name ", stream);
9110 assemble_name (stream, funname);
9111 fputs ("..na\n", stream);
9112 ASM_OUTPUT_LABEL (stream, funname);
9113 fprintf (stream, "\t.pdesc ");
9114 assemble_name (stream, funname);
9115 fprintf (stream, "..en,%s\n",
9116 alpha_procedure_type == PT_STACK ? "stack"
9117 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9118
9119 if (func->links)
9120 {
9121 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9122 /* splay_tree_delete (func->links); */
9123 }
9124 }
9125
9126 /* Given a decl, a section name, and whether the decl initializer
9127 has relocs, choose attributes for the section. */
9128
9129 #define SECTION_VMS_OVERLAY SECTION_FORGET
9130 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9131 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9132
9133 static unsigned int
9134 vms_section_type_flags (tree decl, const char *name, int reloc)
9135 {
9136 unsigned int flags = default_section_type_flags (decl, name, reloc);
9137
9138 if (decl && DECL_ATTRIBUTES (decl)
9139 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9140 flags |= SECTION_VMS_OVERLAY;
9141 if (decl && DECL_ATTRIBUTES (decl)
9142 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9143 flags |= SECTION_VMS_GLOBAL;
9144 if (decl && DECL_ATTRIBUTES (decl)
9145 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9146 flags |= SECTION_VMS_INITIALIZE;
9147
9148 return flags;
9149 }
9150
9151 /* Switch to an arbitrary section NAME with attributes as specified
9152 by FLAGS. ALIGN specifies any known alignment requirements for
9153 the section; 0 if the default should be used. */
9154
9155 static void
9156 vms_asm_named_section (const char *name, unsigned int flags,
9157 tree decl ATTRIBUTE_UNUSED)
9158 {
9159 fputc ('\n', asm_out_file);
9160 fprintf (asm_out_file, ".section\t%s", name);
9161
9162 if (flags & SECTION_VMS_OVERLAY)
9163 fprintf (asm_out_file, ",OVR");
9164 if (flags & SECTION_VMS_GLOBAL)
9165 fprintf (asm_out_file, ",GBL");
9166 if (flags & SECTION_VMS_INITIALIZE)
9167 fprintf (asm_out_file, ",NOMOD");
9168 if (flags & SECTION_DEBUG)
9169 fprintf (asm_out_file, ",NOWRT");
9170
9171 fputc ('\n', asm_out_file);
9172 }
9173
9174 /* Record an element in the table of global constructors. SYMBOL is
9175 a SYMBOL_REF of the function to be called; PRIORITY is a number
9176 between 0 and MAX_INIT_PRIORITY.
9177
9178 Differs from default_ctors_section_asm_out_constructor in that the
9179 width of the .ctors entry is always 64 bits, rather than the 32 bits
9180 used by a normal pointer. */
9181
9182 static void
9183 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9184 {
9185 ctors_section ();
9186 assemble_align (BITS_PER_WORD);
9187 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9188 }
9189
9190 static void
9191 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9192 {
9193 dtors_section ();
9194 assemble_align (BITS_PER_WORD);
9195 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9196 }
9197 #else
9198
9199 rtx
9200 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9201 int is_local ATTRIBUTE_UNUSED)
9202 {
9203 return NULL_RTX;
9204 }
9205
9206 rtx
9207 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9208 tree cfundecl ATTRIBUTE_UNUSED,
9209 int lflag ATTRIBUTE_UNUSED,
9210 int rflag ATTRIBUTE_UNUSED)
9211 {
9212 return NULL_RTX;
9213 }
9214
9215 #endif /* TARGET_ABI_OPEN_VMS */
9216 \f
9217 #if TARGET_ABI_UNICOSMK
9218
9219 /* This evaluates to true if we do not know how to pass TYPE solely in
9220 registers. This is the case for all arguments that do not fit in two
9221 registers. */
9222
9223 static bool
9224 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9225 {
9226 if (type == NULL)
9227 return false;
9228
9229 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9230 return true;
9231 if (TREE_ADDRESSABLE (type))
9232 return true;
9233
9234 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9235 }
9236
9237 /* Define the offset between two registers, one to be eliminated, and the
9238 other its replacement, at the start of a routine. */
9239
9240 int
9241 unicosmk_initial_elimination_offset (int from, int to)
9242 {
9243 int fixed_size;
9244
9245 fixed_size = alpha_sa_size();
9246 if (fixed_size != 0)
9247 fixed_size += 48;
9248
9249 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9250 return -fixed_size;
9251 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9252 return 0;
9253 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9254 return (ALPHA_ROUND (current_function_outgoing_args_size)
9255 + ALPHA_ROUND (get_frame_size()));
9256 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9257 return (ALPHA_ROUND (fixed_size)
9258 + ALPHA_ROUND (get_frame_size()
9259 + current_function_outgoing_args_size));
9260 else
9261 abort ();
9262 }
9263
9264 /* Output the module name for .ident and .end directives. We have to strip
9265 directories and add make sure that the module name starts with a letter
9266 or '$'. */
9267
9268 static void
9269 unicosmk_output_module_name (FILE *file)
9270 {
9271 const char *name = lbasename (main_input_filename);
9272 unsigned len = strlen (name);
9273 char *clean_name = alloca (len + 2);
9274 char *ptr = clean_name;
9275
9276 /* CAM only accepts module names that start with a letter or '$'. We
9277 prefix the module name with a '$' if necessary. */
9278
9279 if (!ISALPHA (*name))
9280 *ptr++ = '$';
9281 memcpy (ptr, name, len + 1);
9282 clean_symbol_name (clean_name);
9283 fputs (clean_name, file);
9284 }
9285
9286 /* Output the definition of a common variable. */
9287
9288 void
9289 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9290 {
9291 tree name_tree;
9292 printf ("T3E__: common %s\n", name);
9293
9294 common_section ();
9295 fputs("\t.endp\n\n\t.psect ", file);
9296 assemble_name(file, name);
9297 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9298 fprintf(file, "\t.byte\t0:%d\n", size);
9299
9300 /* Mark the symbol as defined in this module. */
9301 name_tree = get_identifier (name);
9302 TREE_ASM_WRITTEN (name_tree) = 1;
9303 }
9304
9305 #define SECTION_PUBLIC SECTION_MACH_DEP
9306 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9307 static int current_section_align;
9308
9309 static unsigned int
9310 unicosmk_section_type_flags (tree decl, const char *name,
9311 int reloc ATTRIBUTE_UNUSED)
9312 {
9313 unsigned int flags = default_section_type_flags (decl, name, reloc);
9314
9315 if (!decl)
9316 return flags;
9317
9318 if (TREE_CODE (decl) == FUNCTION_DECL)
9319 {
9320 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9321 if (align_functions_log > current_section_align)
9322 current_section_align = align_functions_log;
9323
9324 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9325 flags |= SECTION_MAIN;
9326 }
9327 else
9328 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9329
9330 if (TREE_PUBLIC (decl))
9331 flags |= SECTION_PUBLIC;
9332
9333 return flags;
9334 }
9335
9336 /* Generate a section name for decl and associate it with the
9337 declaration. */
9338
9339 static void
9340 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9341 {
9342 const char *name;
9343 int len;
9344
9345 if (!decl)
9346 abort ();
9347
9348 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9349 name = default_strip_name_encoding (name);
9350 len = strlen (name);
9351
9352 if (TREE_CODE (decl) == FUNCTION_DECL)
9353 {
9354 char *string;
9355
9356 /* It is essential that we prefix the section name here because
9357 otherwise the section names generated for constructors and
9358 destructors confuse collect2. */
9359
9360 string = alloca (len + 6);
9361 sprintf (string, "code@%s", name);
9362 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9363 }
9364 else if (TREE_PUBLIC (decl))
9365 DECL_SECTION_NAME (decl) = build_string (len, name);
9366 else
9367 {
9368 char *string;
9369
9370 string = alloca (len + 6);
9371 sprintf (string, "data@%s", name);
9372 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9373 }
9374 }
9375
9376 /* Switch to an arbitrary section NAME with attributes as specified
9377 by FLAGS. ALIGN specifies any known alignment requirements for
9378 the section; 0 if the default should be used. */
9379
9380 static void
9381 unicosmk_asm_named_section (const char *name, unsigned int flags,
9382 tree decl ATTRIBUTE_UNUSED)
9383 {
9384 const char *kind;
9385
9386 /* Close the previous section. */
9387
9388 fputs ("\t.endp\n\n", asm_out_file);
9389
9390 /* Find out what kind of section we are opening. */
9391
9392 if (flags & SECTION_MAIN)
9393 fputs ("\t.start\tmain\n", asm_out_file);
9394
9395 if (flags & SECTION_CODE)
9396 kind = "code";
9397 else if (flags & SECTION_PUBLIC)
9398 kind = "common";
9399 else
9400 kind = "data";
9401
9402 if (current_section_align != 0)
9403 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9404 current_section_align, kind);
9405 else
9406 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9407 }
9408
9409 static void
9410 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9411 {
9412 if (DECL_P (decl)
9413 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9414 unicosmk_unique_section (decl, 0);
9415 }
9416
9417 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9418 in code sections because .align fill unused space with zeroes. */
9419
9420 void
9421 unicosmk_output_align (FILE *file, int align)
9422 {
9423 if (inside_function)
9424 fprintf (file, "\tgcc@code@align\t%d\n", align);
9425 else
9426 fprintf (file, "\t.align\t%d\n", align);
9427 }
9428
9429 /* Add a case vector to the current function's list of deferred case
9430 vectors. Case vectors have to be put into a separate section because CAM
9431 does not allow data definitions in code sections. */
9432
9433 void
9434 unicosmk_defer_case_vector (rtx lab, rtx vec)
9435 {
9436 struct machine_function *machine = cfun->machine;
9437
9438 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
9439 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
9440 machine->addr_list);
9441 }
9442
9443 /* Output a case vector. */
9444
9445 static void
9446 unicosmk_output_addr_vec (FILE *file, rtx vec)
9447 {
9448 rtx lab = XEXP (vec, 0);
9449 rtx body = XEXP (vec, 1);
9450 int vlen = XVECLEN (body, 0);
9451 int idx;
9452
9453 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
9454
9455 for (idx = 0; idx < vlen; idx++)
9456 {
9457 ASM_OUTPUT_ADDR_VEC_ELT
9458 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
9459 }
9460 }
9461
9462 /* Output current function's deferred case vectors. */
9463
9464 static void
9465 unicosmk_output_deferred_case_vectors (FILE *file)
9466 {
9467 struct machine_function *machine = cfun->machine;
9468 rtx t;
9469
9470 if (machine->addr_list == NULL_RTX)
9471 return;
9472
9473 data_section ();
9474 for (t = machine->addr_list; t; t = XEXP (t, 1))
9475 unicosmk_output_addr_vec (file, XEXP (t, 0));
9476 }
9477
9478 /* Generate the name of the SSIB section for the current function. */
9479
9480 #define SSIB_PREFIX "__SSIB_"
9481 #define SSIB_PREFIX_LEN 7
9482
9483 static const char *
9484 unicosmk_ssib_name (void)
9485 {
9486 /* This is ok since CAM won't be able to deal with names longer than that
9487 anyway. */
9488
9489 static char name[256];
9490
9491 rtx x;
9492 const char *fnname;
9493 int len;
9494
9495 x = DECL_RTL (cfun->decl);
9496 if (GET_CODE (x) != MEM)
9497 abort ();
9498 x = XEXP (x, 0);
9499 if (GET_CODE (x) != SYMBOL_REF)
9500 abort ();
9501 fnname = XSTR (x, 0);
9502
9503 len = strlen (fnname);
9504 if (len + SSIB_PREFIX_LEN > 255)
9505 len = 255 - SSIB_PREFIX_LEN;
9506
9507 strcpy (name, SSIB_PREFIX);
9508 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
9509 name[len + SSIB_PREFIX_LEN] = 0;
9510
9511 return name;
9512 }
9513
9514 /* Set up the dynamic subprogram information block (DSIB) and update the
9515 frame pointer register ($15) for subroutines which have a frame. If the
9516 subroutine doesn't have a frame, simply increment $15. */
9517
9518 static void
9519 unicosmk_gen_dsib (unsigned long *imaskP)
9520 {
9521 if (alpha_procedure_type == PT_STACK)
9522 {
9523 const char *ssib_name;
9524 rtx mem;
9525
9526 /* Allocate 64 bytes for the DSIB. */
9527
9528 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
9529 GEN_INT (-64))));
9530 emit_insn (gen_blockage ());
9531
9532 /* Save the return address. */
9533
9534 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
9535 set_mem_alias_set (mem, alpha_sr_alias_set);
9536 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
9537 (*imaskP) &= ~(1UL << REG_RA);
9538
9539 /* Save the old frame pointer. */
9540
9541 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
9542 set_mem_alias_set (mem, alpha_sr_alias_set);
9543 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
9544 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
9545
9546 emit_insn (gen_blockage ());
9547
9548 /* Store the SSIB pointer. */
9549
9550 ssib_name = ggc_strdup (unicosmk_ssib_name ());
9551 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
9552 set_mem_alias_set (mem, alpha_sr_alias_set);
9553
9554 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
9555 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
9556 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
9557
9558 /* Save the CIW index. */
9559
9560 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
9561 set_mem_alias_set (mem, alpha_sr_alias_set);
9562 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
9563
9564 emit_insn (gen_blockage ());
9565
9566 /* Set the new frame pointer. */
9567
9568 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9569 stack_pointer_rtx, GEN_INT (64))));
9570
9571 }
9572 else
9573 {
9574 /* Increment the frame pointer register to indicate that we do not
9575 have a frame. */
9576
9577 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
9578 hard_frame_pointer_rtx, const1_rtx)));
9579 }
9580 }
9581
9582 /* Output the static subroutine information block for the current
9583 function. */
9584
9585 static void
9586 unicosmk_output_ssib (FILE *file, const char *fnname)
9587 {
9588 int len;
9589 int i;
9590 rtx x;
9591 rtx ciw;
9592 struct machine_function *machine = cfun->machine;
9593
9594 ssib_section ();
9595 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
9596 unicosmk_ssib_name ());
9597
9598 /* Some required stuff and the function name length. */
9599
9600 len = strlen (fnname);
9601 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
9602
9603 /* Saved registers
9604 ??? We don't do that yet. */
9605
9606 fputs ("\t.quad\t0\n", file);
9607
9608 /* Function address. */
9609
9610 fputs ("\t.quad\t", file);
9611 assemble_name (file, fnname);
9612 putc ('\n', file);
9613
9614 fputs ("\t.quad\t0\n", file);
9615 fputs ("\t.quad\t0\n", file);
9616
9617 /* Function name.
9618 ??? We do it the same way Cray CC does it but this could be
9619 simplified. */
9620
9621 for( i = 0; i < len; i++ )
9622 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
9623 if( (len % 8) == 0 )
9624 fputs ("\t.quad\t0\n", file);
9625 else
9626 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
9627
9628 /* All call information words used in the function. */
9629
9630 for (x = machine->first_ciw; x; x = XEXP (x, 1))
9631 {
9632 ciw = XEXP (x, 0);
9633 #if HOST_BITS_PER_WIDE_INT == 32
9634 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
9635 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
9636 #else
9637 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
9638 #endif
9639 }
9640 }
9641
9642 /* Add a call information word (CIW) to the list of the current function's
9643 CIWs and return its index.
9644
9645 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
9646
9647 rtx
9648 unicosmk_add_call_info_word (rtx x)
9649 {
9650 rtx node;
9651 struct machine_function *machine = cfun->machine;
9652
9653 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
9654 if (machine->first_ciw == NULL_RTX)
9655 machine->first_ciw = node;
9656 else
9657 XEXP (machine->last_ciw, 1) = node;
9658
9659 machine->last_ciw = node;
9660 ++machine->ciw_count;
9661
9662 return GEN_INT (machine->ciw_count
9663 + strlen (current_function_name ())/8 + 5);
9664 }
9665
9666 static char unicosmk_section_buf[100];
9667
9668 char *
9669 unicosmk_text_section (void)
9670 {
9671 static int count = 0;
9672 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@text___%d,code",
9673 count++);
9674 return unicosmk_section_buf;
9675 }
9676
9677 char *
9678 unicosmk_data_section (void)
9679 {
9680 static int count = 1;
9681 sprintf (unicosmk_section_buf, "\t.endp\n\n\t.psect\tgcc@data___%d,data",
9682 count++);
9683 return unicosmk_section_buf;
9684 }
9685
9686 /* The Cray assembler doesn't accept extern declarations for symbols which
9687 are defined in the same file. We have to keep track of all global
9688 symbols which are referenced and/or defined in a source file and output
9689 extern declarations for those which are referenced but not defined at
9690 the end of file. */
9691
9692 /* List of identifiers for which an extern declaration might have to be
9693 emitted. */
9694 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9695
9696 struct unicosmk_extern_list
9697 {
9698 struct unicosmk_extern_list *next;
9699 const char *name;
9700 };
9701
9702 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
9703
9704 /* Output extern declarations which are required for every asm file. */
9705
9706 static void
9707 unicosmk_output_default_externs (FILE *file)
9708 {
9709 static const char *const externs[] =
9710 { "__T3E_MISMATCH" };
9711
9712 int i;
9713 int n;
9714
9715 n = ARRAY_SIZE (externs);
9716
9717 for (i = 0; i < n; i++)
9718 fprintf (file, "\t.extern\t%s\n", externs[i]);
9719 }
9720
9721 /* Output extern declarations for global symbols which are have been
9722 referenced but not defined. */
9723
9724 static void
9725 unicosmk_output_externs (FILE *file)
9726 {
9727 struct unicosmk_extern_list *p;
9728 const char *real_name;
9729 int len;
9730 tree name_tree;
9731
9732 len = strlen (user_label_prefix);
9733 for (p = unicosmk_extern_head; p != 0; p = p->next)
9734 {
9735 /* We have to strip the encoding and possibly remove user_label_prefix
9736 from the identifier in order to handle -fleading-underscore and
9737 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
9738 real_name = default_strip_name_encoding (p->name);
9739 if (len && p->name[0] == '*'
9740 && !memcmp (real_name, user_label_prefix, len))
9741 real_name += len;
9742
9743 name_tree = get_identifier (real_name);
9744 if (! TREE_ASM_WRITTEN (name_tree))
9745 {
9746 TREE_ASM_WRITTEN (name_tree) = 1;
9747 fputs ("\t.extern\t", file);
9748 assemble_name (file, p->name);
9749 putc ('\n', file);
9750 }
9751 }
9752 }
9753
9754 /* Record an extern. */
9755
9756 void
9757 unicosmk_add_extern (const char *name)
9758 {
9759 struct unicosmk_extern_list *p;
9760
9761 p = (struct unicosmk_extern_list *)
9762 xmalloc (sizeof (struct unicosmk_extern_list));
9763 p->next = unicosmk_extern_head;
9764 p->name = name;
9765 unicosmk_extern_head = p;
9766 }
9767
9768 /* The Cray assembler generates incorrect code if identifiers which
9769 conflict with register names are used as instruction operands. We have
9770 to replace such identifiers with DEX expressions. */
9771
9772 /* Structure to collect identifiers which have been replaced by DEX
9773 expressions. */
9774 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
9775
9776 struct unicosmk_dex {
9777 struct unicosmk_dex *next;
9778 const char *name;
9779 };
9780
9781 /* List of identifiers which have been replaced by DEX expressions. The DEX
9782 number is determined by the position in the list. */
9783
9784 static struct unicosmk_dex *unicosmk_dex_list = NULL;
9785
9786 /* The number of elements in the DEX list. */
9787
9788 static int unicosmk_dex_count = 0;
9789
9790 /* Check if NAME must be replaced by a DEX expression. */
9791
9792 static int
9793 unicosmk_special_name (const char *name)
9794 {
9795 if (name[0] == '*')
9796 ++name;
9797
9798 if (name[0] == '$')
9799 ++name;
9800
9801 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
9802 return 0;
9803
9804 switch (name[1])
9805 {
9806 case '1': case '2':
9807 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
9808
9809 case '3':
9810 return (name[2] == '\0'
9811 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
9812
9813 default:
9814 return (ISDIGIT (name[1]) && name[2] == '\0');
9815 }
9816 }
9817
9818 /* Return the DEX number if X must be replaced by a DEX expression and 0
9819 otherwise. */
9820
9821 static int
9822 unicosmk_need_dex (rtx x)
9823 {
9824 struct unicosmk_dex *dex;
9825 const char *name;
9826 int i;
9827
9828 if (GET_CODE (x) != SYMBOL_REF)
9829 return 0;
9830
9831 name = XSTR (x,0);
9832 if (! unicosmk_special_name (name))
9833 return 0;
9834
9835 i = unicosmk_dex_count;
9836 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9837 {
9838 if (! strcmp (name, dex->name))
9839 return i;
9840 --i;
9841 }
9842
9843 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
9844 dex->name = name;
9845 dex->next = unicosmk_dex_list;
9846 unicosmk_dex_list = dex;
9847
9848 ++unicosmk_dex_count;
9849 return unicosmk_dex_count;
9850 }
9851
9852 /* Output the DEX definitions for this file. */
9853
9854 static void
9855 unicosmk_output_dex (FILE *file)
9856 {
9857 struct unicosmk_dex *dex;
9858 int i;
9859
9860 if (unicosmk_dex_list == NULL)
9861 return;
9862
9863 fprintf (file, "\t.dexstart\n");
9864
9865 i = unicosmk_dex_count;
9866 for (dex = unicosmk_dex_list; dex; dex = dex->next)
9867 {
9868 fprintf (file, "\tDEX (%d) = ", i);
9869 assemble_name (file, dex->name);
9870 putc ('\n', file);
9871 --i;
9872 }
9873
9874 fprintf (file, "\t.dexend\n");
9875 }
9876
9877 /* Output text that to appear at the beginning of an assembler file. */
9878
9879 static void
9880 unicosmk_file_start (void)
9881 {
9882 int i;
9883
9884 fputs ("\t.ident\t", asm_out_file);
9885 unicosmk_output_module_name (asm_out_file);
9886 fputs ("\n\n", asm_out_file);
9887
9888 /* The Unicos/Mk assembler uses different register names. Instead of trying
9889 to support them, we simply use micro definitions. */
9890
9891 /* CAM has different register names: rN for the integer register N and fN
9892 for the floating-point register N. Instead of trying to use these in
9893 alpha.md, we define the symbols $N and $fN to refer to the appropriate
9894 register. */
9895
9896 for (i = 0; i < 32; ++i)
9897 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
9898
9899 for (i = 0; i < 32; ++i)
9900 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
9901
9902 putc ('\n', asm_out_file);
9903
9904 /* The .align directive fill unused space with zeroes which does not work
9905 in code sections. We define the macro 'gcc@code@align' which uses nops
9906 instead. Note that it assumes that code sections always have the
9907 biggest possible alignment since . refers to the current offset from
9908 the beginning of the section. */
9909
9910 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
9911 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
9912 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
9913 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
9914 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
9915 fputs ("\tbis r31,r31,r31\n", asm_out_file);
9916 fputs ("\t.endr\n", asm_out_file);
9917 fputs ("\t.endif\n", asm_out_file);
9918 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
9919
9920 /* Output extern declarations which should always be visible. */
9921 unicosmk_output_default_externs (asm_out_file);
9922
9923 /* Open a dummy section. We always need to be inside a section for the
9924 section-switching code to work correctly.
9925 ??? This should be a module id or something like that. I still have to
9926 figure out what the rules for those are. */
9927 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
9928 }
9929
9930 /* Output text to appear at the end of an assembler file. This includes all
9931 pending extern declarations and DEX expressions. */
9932
9933 static void
9934 unicosmk_file_end (void)
9935 {
9936 fputs ("\t.endp\n\n", asm_out_file);
9937
9938 /* Output all pending externs. */
9939
9940 unicosmk_output_externs (asm_out_file);
9941
9942 /* Output dex definitions used for functions whose names conflict with
9943 register names. */
9944
9945 unicosmk_output_dex (asm_out_file);
9946
9947 fputs ("\t.end\t", asm_out_file);
9948 unicosmk_output_module_name (asm_out_file);
9949 putc ('\n', asm_out_file);
9950 }
9951
9952 #else
9953
9954 static void
9955 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
9956 {}
9957
9958 static void
9959 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
9960 {}
9961
9962 static void
9963 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
9964 const char * fnname ATTRIBUTE_UNUSED)
9965 {}
9966
9967 rtx
9968 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
9969 {
9970 return NULL_RTX;
9971 }
9972
9973 static int
9974 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
9975 {
9976 return 0;
9977 }
9978
9979 #endif /* TARGET_ABI_UNICOSMK */
9980
9981 static void
9982 alpha_init_libfuncs (void)
9983 {
9984 if (TARGET_ABI_UNICOSMK)
9985 {
9986 /* Prevent gcc from generating calls to __divsi3. */
9987 set_optab_libfunc (sdiv_optab, SImode, 0);
9988 set_optab_libfunc (udiv_optab, SImode, 0);
9989
9990 /* Use the functions provided by the system library
9991 for DImode integer division. */
9992 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
9993 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
9994 }
9995 else if (TARGET_ABI_OPEN_VMS)
9996 {
9997 /* Use the VMS runtime library functions for division and
9998 remainder. */
9999 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10000 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10001 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10002 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10003 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10004 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10005 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10006 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10007 }
10008 }
10009
10010 \f
10011 /* Initialize the GCC target structure. */
10012 #if TARGET_ABI_OPEN_VMS
10013 # undef TARGET_ATTRIBUTE_TABLE
10014 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10015 # undef TARGET_SECTION_TYPE_FLAGS
10016 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10017 #endif
10018
10019 #undef TARGET_IN_SMALL_DATA_P
10020 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10021
10022 #if TARGET_ABI_UNICOSMK
10023 # undef TARGET_INSERT_ATTRIBUTES
10024 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10025 # undef TARGET_SECTION_TYPE_FLAGS
10026 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10027 # undef TARGET_ASM_UNIQUE_SECTION
10028 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10029 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10030 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10031 # undef TARGET_ASM_GLOBALIZE_LABEL
10032 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10033 # undef TARGET_MUST_PASS_IN_STACK
10034 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10035 #endif
10036
10037 #undef TARGET_ASM_ALIGNED_HI_OP
10038 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10039 #undef TARGET_ASM_ALIGNED_DI_OP
10040 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10041
10042 /* Default unaligned ops are provided for ELF systems. To get unaligned
10043 data for non-ELF systems, we have to turn off auto alignment. */
10044 #ifndef OBJECT_FORMAT_ELF
10045 #undef TARGET_ASM_UNALIGNED_HI_OP
10046 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10047 #undef TARGET_ASM_UNALIGNED_SI_OP
10048 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10049 #undef TARGET_ASM_UNALIGNED_DI_OP
10050 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10051 #endif
10052
10053 #ifdef OBJECT_FORMAT_ELF
10054 #undef TARGET_ASM_SELECT_RTX_SECTION
10055 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10056 #endif
10057
10058 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10059 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10060
10061 #undef TARGET_INIT_LIBFUNCS
10062 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10063
10064 #if TARGET_ABI_UNICOSMK
10065 #undef TARGET_ASM_FILE_START
10066 #define TARGET_ASM_FILE_START unicosmk_file_start
10067 #undef TARGET_ASM_FILE_END
10068 #define TARGET_ASM_FILE_END unicosmk_file_end
10069 #else
10070 #undef TARGET_ASM_FILE_START
10071 #define TARGET_ASM_FILE_START alpha_file_start
10072 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10073 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10074 #endif
10075
10076 #undef TARGET_SCHED_ADJUST_COST
10077 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10078 #undef TARGET_SCHED_ISSUE_RATE
10079 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10080 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10081 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10082 alpha_multipass_dfa_lookahead
10083
10084 #undef TARGET_HAVE_TLS
10085 #define TARGET_HAVE_TLS HAVE_AS_TLS
10086
10087 #undef TARGET_INIT_BUILTINS
10088 #define TARGET_INIT_BUILTINS alpha_init_builtins
10089 #undef TARGET_EXPAND_BUILTIN
10090 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10091 #undef TARGET_FOLD_BUILTIN
10092 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10093
10094 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10095 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10096 #undef TARGET_CANNOT_COPY_INSN_P
10097 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10098 #undef TARGET_CANNOT_FORCE_CONST_MEM
10099 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10100
10101 #if TARGET_ABI_OSF
10102 #undef TARGET_ASM_OUTPUT_MI_THUNK
10103 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10104 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10105 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10106 #endif
10107
10108 #undef TARGET_RTX_COSTS
10109 #define TARGET_RTX_COSTS alpha_rtx_costs
10110 #undef TARGET_ADDRESS_COST
10111 #define TARGET_ADDRESS_COST hook_int_rtx_0
10112
10113 #undef TARGET_MACHINE_DEPENDENT_REORG
10114 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10115
10116 #undef TARGET_PROMOTE_FUNCTION_ARGS
10117 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10118 #undef TARGET_PROMOTE_FUNCTION_RETURN
10119 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10120 #undef TARGET_PROMOTE_PROTOTYPES
10121 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10122 #undef TARGET_RETURN_IN_MEMORY
10123 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10124 #undef TARGET_PASS_BY_REFERENCE
10125 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10126 #undef TARGET_SETUP_INCOMING_VARARGS
10127 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10128 #undef TARGET_STRICT_ARGUMENT_NAMING
10129 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10130 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10131 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10132 #undef TARGET_SPLIT_COMPLEX_ARG
10133 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10134 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10135 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10136 #undef TARGET_ARG_PARTIAL_BYTES
10137 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10138
10139 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10140 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10141 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10142 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10143
10144 #undef TARGET_BUILD_BUILTIN_VA_LIST
10145 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10146
10147 /* The Alpha architecture does not require sequential consistency. See
10148 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10149 for an example of how it can be violated in practice. */
10150 #undef TARGET_RELAXED_ORDERING
10151 #define TARGET_RELAXED_ORDERING true
10152
10153 struct gcc_target targetm = TARGET_INITIALIZER;
10154
10155 \f
10156 #include "gt-alpha.h"