alpha-protos.h (function_arg): Delete.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60 #include "libfuncs.h"
61
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
64
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
67
68 static const char * const alpha_cpu_name[] =
69 {
70 "ev4", "ev5", "ev6"
71 };
72
73 /* Specify how accurate floating-point traps need to be. */
74
75 enum alpha_trap_precision alpha_tp;
76
77 /* Specify the floating-point rounding mode. */
78
79 enum alpha_fp_rounding_mode alpha_fprm;
80
81 /* Specify which things cause traps. */
82
83 enum alpha_fp_trap_mode alpha_fptm;
84
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
87
88 static int inside_function = FALSE;
89
90 /* The number of cycles of latency we should assume on memory reads. */
91
92 int alpha_memory_latency = 3;
93
94 /* Whether the function needs the GP. */
95
96 static int alpha_function_needs_gp;
97
98 /* The alias set for prologue/epilogue register save/restore. */
99
100 static GTY(()) alias_set_type alpha_sr_alias_set;
101
102 /* The assembler name of the current function. */
103
104 static const char *alpha_fnname;
105
106 /* The next explicit relocation sequence number. */
107 extern GTY(()) int alpha_next_sequence_number;
108 int alpha_next_sequence_number = 1;
109
110 /* The literal and gpdisp sequence numbers for this insn, as printed
111 by %# and %* respectively. */
112 extern GTY(()) int alpha_this_literal_sequence_number;
113 extern GTY(()) int alpha_this_gpdisp_sequence_number;
114 int alpha_this_literal_sequence_number;
115 int alpha_this_gpdisp_sequence_number;
116
117 /* Costs of various operations on the different architectures. */
118
119 struct alpha_rtx_cost_data
120 {
121 unsigned char fp_add;
122 unsigned char fp_mult;
123 unsigned char fp_div_sf;
124 unsigned char fp_div_df;
125 unsigned char int_mult_si;
126 unsigned char int_mult_di;
127 unsigned char int_shift;
128 unsigned char int_cmov;
129 unsigned short int_div;
130 };
131
132 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
133 {
134 { /* EV4 */
135 COSTS_N_INSNS (6), /* fp_add */
136 COSTS_N_INSNS (6), /* fp_mult */
137 COSTS_N_INSNS (34), /* fp_div_sf */
138 COSTS_N_INSNS (63), /* fp_div_df */
139 COSTS_N_INSNS (23), /* int_mult_si */
140 COSTS_N_INSNS (23), /* int_mult_di */
141 COSTS_N_INSNS (2), /* int_shift */
142 COSTS_N_INSNS (2), /* int_cmov */
143 COSTS_N_INSNS (97), /* int_div */
144 },
145 { /* EV5 */
146 COSTS_N_INSNS (4), /* fp_add */
147 COSTS_N_INSNS (4), /* fp_mult */
148 COSTS_N_INSNS (15), /* fp_div_sf */
149 COSTS_N_INSNS (22), /* fp_div_df */
150 COSTS_N_INSNS (8), /* int_mult_si */
151 COSTS_N_INSNS (12), /* int_mult_di */
152 COSTS_N_INSNS (1) + 1, /* int_shift */
153 COSTS_N_INSNS (1), /* int_cmov */
154 COSTS_N_INSNS (83), /* int_div */
155 },
156 { /* EV6 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (12), /* fp_div_sf */
160 COSTS_N_INSNS (15), /* fp_div_df */
161 COSTS_N_INSNS (7), /* int_mult_si */
162 COSTS_N_INSNS (7), /* int_mult_di */
163 COSTS_N_INSNS (1), /* int_shift */
164 COSTS_N_INSNS (2), /* int_cmov */
165 COSTS_N_INSNS (86), /* int_div */
166 },
167 };
168
169 /* Similar but tuned for code size instead of execution latency. The
170 extra +N is fractional cost tuning based on latency. It's used to
171 encourage use of cheaper insns like shift, but only if there's just
172 one of them. */
173
174 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175 {
176 COSTS_N_INSNS (1), /* fp_add */
177 COSTS_N_INSNS (1), /* fp_mult */
178 COSTS_N_INSNS (1), /* fp_div_sf */
179 COSTS_N_INSNS (1) + 1, /* fp_div_df */
180 COSTS_N_INSNS (1) + 1, /* int_mult_si */
181 COSTS_N_INSNS (1) + 2, /* int_mult_di */
182 COSTS_N_INSNS (1), /* int_shift */
183 COSTS_N_INSNS (1), /* int_cmov */
184 COSTS_N_INSNS (6), /* int_div */
185 };
186
187 /* Get the number of args of a function in one of two ways. */
188 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
189 #define NUM_ARGS crtl->args.info.num_args
190 #else
191 #define NUM_ARGS crtl->args.info
192 #endif
193
194 #define REG_PV 27
195 #define REG_RA 26
196
197 /* Declarations of static functions. */
198 static struct machine_function *alpha_init_machine_status (void);
199 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200
201 #if TARGET_ABI_OPEN_VMS
202 static void alpha_write_linkage (FILE *, const char *, tree);
203 static bool vms_valid_pointer_mode (enum machine_mode);
204 #endif
205
206 static void unicosmk_output_deferred_case_vectors (FILE *);
207 static void unicosmk_gen_dsib (unsigned long *);
208 static void unicosmk_output_ssib (FILE *, const char *);
209 static int unicosmk_need_dex (rtx);
210 \f
211 /* Implement TARGET_HANDLE_OPTION. */
212
213 static bool
214 alpha_handle_option (size_t code, const char *arg, int value)
215 {
216 switch (code)
217 {
218 case OPT_mfp_regs:
219 if (value == 0)
220 target_flags |= MASK_SOFT_FP;
221 break;
222
223 case OPT_mieee:
224 case OPT_mieee_with_inexact:
225 target_flags |= MASK_IEEE_CONFORMANT;
226 break;
227
228 case OPT_mtls_size_:
229 if (value != 16 && value != 32 && value != 64)
230 error ("bad value %qs for -mtls-size switch", arg);
231 break;
232 }
233
234 return true;
235 }
236
237 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
238 /* Implement TARGET_MANGLE_TYPE. */
239
240 static const char *
241 alpha_mangle_type (const_tree type)
242 {
243 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
244 && TARGET_LONG_DOUBLE_128)
245 return "g";
246
247 /* For all other types, use normal C++ mangling. */
248 return NULL;
249 }
250 #endif
251
252 /* Parse target option strings. */
253
254 static void
255 alpha_option_override (void)
256 {
257 static const struct cpu_table {
258 const char *const name;
259 const enum processor_type processor;
260 const int flags;
261 } cpu_table[] = {
262 { "ev4", PROCESSOR_EV4, 0 },
263 { "ev45", PROCESSOR_EV4, 0 },
264 { "21064", PROCESSOR_EV4, 0 },
265 { "ev5", PROCESSOR_EV5, 0 },
266 { "21164", PROCESSOR_EV5, 0 },
267 { "ev56", PROCESSOR_EV5, MASK_BWX },
268 { "21164a", PROCESSOR_EV5, MASK_BWX },
269 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
273 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
275 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
276 };
277
278 int const ct_size = ARRAY_SIZE (cpu_table);
279 int i;
280
281 #ifdef SUBTARGET_OVERRIDE_OPTIONS
282 SUBTARGET_OVERRIDE_OPTIONS;
283 #endif
284
285 /* Unicos/Mk doesn't have shared libraries. */
286 if (TARGET_ABI_UNICOSMK && flag_pic)
287 {
288 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
289 (flag_pic > 1) ? "PIC" : "pic");
290 flag_pic = 0;
291 }
292
293 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
294 floating-point instructions. Make that the default for this target. */
295 if (TARGET_ABI_UNICOSMK)
296 alpha_fprm = ALPHA_FPRM_DYN;
297 else
298 alpha_fprm = ALPHA_FPRM_NORM;
299
300 alpha_tp = ALPHA_TP_PROG;
301 alpha_fptm = ALPHA_FPTM_N;
302
303 /* We cannot use su and sui qualifiers for conversion instructions on
304 Unicos/Mk. I'm not sure if this is due to assembler or hardware
305 limitations. Right now, we issue a warning if -mieee is specified
306 and then ignore it; eventually, we should either get it right or
307 disable the option altogether. */
308
309 if (TARGET_IEEE)
310 {
311 if (TARGET_ABI_UNICOSMK)
312 warning (0, "-mieee not supported on Unicos/Mk");
313 else
314 {
315 alpha_tp = ALPHA_TP_INSN;
316 alpha_fptm = ALPHA_FPTM_SU;
317 }
318 }
319
320 if (TARGET_IEEE_WITH_INEXACT)
321 {
322 if (TARGET_ABI_UNICOSMK)
323 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
324 else
325 {
326 alpha_tp = ALPHA_TP_INSN;
327 alpha_fptm = ALPHA_FPTM_SUI;
328 }
329 }
330
331 if (alpha_tp_string)
332 {
333 if (! strcmp (alpha_tp_string, "p"))
334 alpha_tp = ALPHA_TP_PROG;
335 else if (! strcmp (alpha_tp_string, "f"))
336 alpha_tp = ALPHA_TP_FUNC;
337 else if (! strcmp (alpha_tp_string, "i"))
338 alpha_tp = ALPHA_TP_INSN;
339 else
340 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
341 }
342
343 if (alpha_fprm_string)
344 {
345 if (! strcmp (alpha_fprm_string, "n"))
346 alpha_fprm = ALPHA_FPRM_NORM;
347 else if (! strcmp (alpha_fprm_string, "m"))
348 alpha_fprm = ALPHA_FPRM_MINF;
349 else if (! strcmp (alpha_fprm_string, "c"))
350 alpha_fprm = ALPHA_FPRM_CHOP;
351 else if (! strcmp (alpha_fprm_string,"d"))
352 alpha_fprm = ALPHA_FPRM_DYN;
353 else
354 error ("bad value %qs for -mfp-rounding-mode switch",
355 alpha_fprm_string);
356 }
357
358 if (alpha_fptm_string)
359 {
360 if (strcmp (alpha_fptm_string, "n") == 0)
361 alpha_fptm = ALPHA_FPTM_N;
362 else if (strcmp (alpha_fptm_string, "u") == 0)
363 alpha_fptm = ALPHA_FPTM_U;
364 else if (strcmp (alpha_fptm_string, "su") == 0)
365 alpha_fptm = ALPHA_FPTM_SU;
366 else if (strcmp (alpha_fptm_string, "sui") == 0)
367 alpha_fptm = ALPHA_FPTM_SUI;
368 else
369 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
370 }
371
372 if (alpha_cpu_string)
373 {
374 for (i = 0; i < ct_size; i++)
375 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
376 {
377 alpha_tune = alpha_cpu = cpu_table [i].processor;
378 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
379 target_flags |= cpu_table [i].flags;
380 break;
381 }
382 if (i == ct_size)
383 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
384 }
385
386 if (alpha_tune_string)
387 {
388 for (i = 0; i < ct_size; i++)
389 if (! strcmp (alpha_tune_string, cpu_table [i].name))
390 {
391 alpha_tune = cpu_table [i].processor;
392 break;
393 }
394 if (i == ct_size)
395 error ("bad value %qs for -mtune switch", alpha_tune_string);
396 }
397
398 /* Do some sanity checks on the above options. */
399
400 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
401 {
402 warning (0, "trap mode not supported on Unicos/Mk");
403 alpha_fptm = ALPHA_FPTM_N;
404 }
405
406 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
407 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
408 {
409 warning (0, "fp software completion requires -mtrap-precision=i");
410 alpha_tp = ALPHA_TP_INSN;
411 }
412
413 if (alpha_cpu == PROCESSOR_EV6)
414 {
415 /* Except for EV6 pass 1 (not released), we always have precise
416 arithmetic traps. Which means we can do software completion
417 without minding trap shadows. */
418 alpha_tp = ALPHA_TP_PROG;
419 }
420
421 if (TARGET_FLOAT_VAX)
422 {
423 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
424 {
425 warning (0, "rounding mode not supported for VAX floats");
426 alpha_fprm = ALPHA_FPRM_NORM;
427 }
428 if (alpha_fptm == ALPHA_FPTM_SUI)
429 {
430 warning (0, "trap mode not supported for VAX floats");
431 alpha_fptm = ALPHA_FPTM_SU;
432 }
433 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
434 warning (0, "128-bit long double not supported for VAX floats");
435 target_flags &= ~MASK_LONG_DOUBLE_128;
436 }
437
438 {
439 char *end;
440 int lat;
441
442 if (!alpha_mlat_string)
443 alpha_mlat_string = "L1";
444
445 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
446 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
447 ;
448 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
449 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
450 && alpha_mlat_string[2] == '\0')
451 {
452 static int const cache_latency[][4] =
453 {
454 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
455 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
456 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
457 };
458
459 lat = alpha_mlat_string[1] - '0';
460 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
461 {
462 warning (0, "L%d cache latency unknown for %s",
463 lat, alpha_cpu_name[alpha_tune]);
464 lat = 3;
465 }
466 else
467 lat = cache_latency[alpha_tune][lat-1];
468 }
469 else if (! strcmp (alpha_mlat_string, "main"))
470 {
471 /* Most current memories have about 370ns latency. This is
472 a reasonable guess for a fast cpu. */
473 lat = 150;
474 }
475 else
476 {
477 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
478 lat = 3;
479 }
480
481 alpha_memory_latency = lat;
482 }
483
484 /* Default the definition of "small data" to 8 bytes. */
485 if (!global_options_set.x_g_switch_value)
486 g_switch_value = 8;
487
488 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
489 if (flag_pic == 1)
490 target_flags |= MASK_SMALL_DATA;
491 else if (flag_pic == 2)
492 target_flags &= ~MASK_SMALL_DATA;
493
494 /* Align labels and loops for optimal branching. */
495 /* ??? Kludge these by not doing anything if we don't optimize and also if
496 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
497 if (optimize > 0 && write_symbols != SDB_DEBUG)
498 {
499 if (align_loops <= 0)
500 align_loops = 16;
501 if (align_jumps <= 0)
502 align_jumps = 16;
503 }
504 if (align_functions <= 0)
505 align_functions = 16;
506
507 /* Acquire a unique set number for our register saves and restores. */
508 alpha_sr_alias_set = new_alias_set ();
509
510 /* Register variables and functions with the garbage collector. */
511
512 /* Set up function hooks. */
513 init_machine_status = alpha_init_machine_status;
514
515 /* Tell the compiler when we're using VAX floating point. */
516 if (TARGET_FLOAT_VAX)
517 {
518 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
519 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
520 REAL_MODE_FORMAT (TFmode) = NULL;
521 }
522
523 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
524 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
525 target_flags |= MASK_LONG_DOUBLE_128;
526 #endif
527
528 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
529 can be optimized to ap = __builtin_next_arg (0). */
530 if (TARGET_ABI_UNICOSMK)
531 targetm.expand_builtin_va_start = NULL;
532 }
533 \f
534 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
535
536 int
537 zap_mask (HOST_WIDE_INT value)
538 {
539 int i;
540
541 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
542 i++, value >>= 8)
543 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
544 return 0;
545
546 return 1;
547 }
548
549 /* Return true if OP is valid for a particular TLS relocation.
550 We are already guaranteed that OP is a CONST. */
551
552 int
553 tls_symbolic_operand_1 (rtx op, int size, int unspec)
554 {
555 op = XEXP (op, 0);
556
557 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
558 return 0;
559 op = XVECEXP (op, 0, 0);
560
561 if (GET_CODE (op) != SYMBOL_REF)
562 return 0;
563
564 switch (SYMBOL_REF_TLS_MODEL (op))
565 {
566 case TLS_MODEL_LOCAL_DYNAMIC:
567 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
568 case TLS_MODEL_INITIAL_EXEC:
569 return unspec == UNSPEC_TPREL && size == 64;
570 case TLS_MODEL_LOCAL_EXEC:
571 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
572 default:
573 gcc_unreachable ();
574 }
575 }
576
577 /* Used by aligned_memory_operand and unaligned_memory_operand to
578 resolve what reload is going to do with OP if it's a register. */
579
580 rtx
581 resolve_reload_operand (rtx op)
582 {
583 if (reload_in_progress)
584 {
585 rtx tmp = op;
586 if (GET_CODE (tmp) == SUBREG)
587 tmp = SUBREG_REG (tmp);
588 if (REG_P (tmp)
589 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
590 {
591 op = reg_equiv_memory_loc[REGNO (tmp)];
592 if (op == 0)
593 return 0;
594 }
595 }
596 return op;
597 }
598
599 /* The scalar modes supported differs from the default check-what-c-supports
600 version in that sometimes TFmode is available even when long double
601 indicates only DFmode. On unicosmk, we have the situation that HImode
602 doesn't map to any C type, but of course we still support that. */
603
604 static bool
605 alpha_scalar_mode_supported_p (enum machine_mode mode)
606 {
607 switch (mode)
608 {
609 case QImode:
610 case HImode:
611 case SImode:
612 case DImode:
613 case TImode: /* via optabs.c */
614 return true;
615
616 case SFmode:
617 case DFmode:
618 return true;
619
620 case TFmode:
621 return TARGET_HAS_XFLOATING_LIBS;
622
623 default:
624 return false;
625 }
626 }
627
628 /* Alpha implements a couple of integer vector mode operations when
629 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
630 which allows the vectorizer to operate on e.g. move instructions,
631 or when expand_vector_operations can do something useful. */
632
633 static bool
634 alpha_vector_mode_supported_p (enum machine_mode mode)
635 {
636 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
637 }
638
639 /* Return 1 if this function can directly return via $26. */
640
641 int
642 direct_return (void)
643 {
644 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
645 && reload_completed
646 && alpha_sa_size () == 0
647 && get_frame_size () == 0
648 && crtl->outgoing_args_size == 0
649 && crtl->args.pretend_args_size == 0);
650 }
651
652 /* Return the ADDR_VEC associated with a tablejump insn. */
653
654 rtx
655 alpha_tablejump_addr_vec (rtx insn)
656 {
657 rtx tmp;
658
659 tmp = JUMP_LABEL (insn);
660 if (!tmp)
661 return NULL_RTX;
662 tmp = NEXT_INSN (tmp);
663 if (!tmp)
664 return NULL_RTX;
665 if (JUMP_P (tmp)
666 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
667 return PATTERN (tmp);
668 return NULL_RTX;
669 }
670
671 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
672
673 rtx
674 alpha_tablejump_best_label (rtx insn)
675 {
676 rtx jump_table = alpha_tablejump_addr_vec (insn);
677 rtx best_label = NULL_RTX;
678
679 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
680 there for edge frequency counts from profile data. */
681
682 if (jump_table)
683 {
684 int n_labels = XVECLEN (jump_table, 1);
685 int best_count = -1;
686 int i, j;
687
688 for (i = 0; i < n_labels; i++)
689 {
690 int count = 1;
691
692 for (j = i + 1; j < n_labels; j++)
693 if (XEXP (XVECEXP (jump_table, 1, i), 0)
694 == XEXP (XVECEXP (jump_table, 1, j), 0))
695 count++;
696
697 if (count > best_count)
698 best_count = count, best_label = XVECEXP (jump_table, 1, i);
699 }
700 }
701
702 return best_label ? best_label : const0_rtx;
703 }
704
705 /* Return the TLS model to use for SYMBOL. */
706
707 static enum tls_model
708 tls_symbolic_operand_type (rtx symbol)
709 {
710 enum tls_model model;
711
712 if (GET_CODE (symbol) != SYMBOL_REF)
713 return TLS_MODEL_NONE;
714 model = SYMBOL_REF_TLS_MODEL (symbol);
715
716 /* Local-exec with a 64-bit size is the same code as initial-exec. */
717 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
718 model = TLS_MODEL_INITIAL_EXEC;
719
720 return model;
721 }
722 \f
723 /* Return true if the function DECL will share the same GP as any
724 function in the current unit of translation. */
725
726 static bool
727 decl_has_samegp (const_tree decl)
728 {
729 /* Functions that are not local can be overridden, and thus may
730 not share the same gp. */
731 if (!(*targetm.binds_local_p) (decl))
732 return false;
733
734 /* If -msmall-data is in effect, assume that there is only one GP
735 for the module, and so any local symbol has this property. We
736 need explicit relocations to be able to enforce this for symbols
737 not defined in this unit of translation, however. */
738 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
739 return true;
740
741 /* Functions that are not external are defined in this UoT. */
742 /* ??? Irritatingly, static functions not yet emitted are still
743 marked "external". Apply this to non-static functions only. */
744 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
745 }
746
747 /* Return true if EXP should be placed in the small data section. */
748
749 static bool
750 alpha_in_small_data_p (const_tree exp)
751 {
752 /* We want to merge strings, so we never consider them small data. */
753 if (TREE_CODE (exp) == STRING_CST)
754 return false;
755
756 /* Functions are never in the small data area. Duh. */
757 if (TREE_CODE (exp) == FUNCTION_DECL)
758 return false;
759
760 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
761 {
762 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
763 if (strcmp (section, ".sdata") == 0
764 || strcmp (section, ".sbss") == 0)
765 return true;
766 }
767 else
768 {
769 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
770
771 /* If this is an incomplete type with size 0, then we can't put it
772 in sdata because it might be too big when completed. */
773 if (size > 0 && size <= g_switch_value)
774 return true;
775 }
776
777 return false;
778 }
779
780 #if TARGET_ABI_OPEN_VMS
781 static bool
782 vms_valid_pointer_mode (enum machine_mode mode)
783 {
784 return (mode == SImode || mode == DImode);
785 }
786
787 static bool
788 alpha_linkage_symbol_p (const char *symname)
789 {
790 int symlen = strlen (symname);
791
792 if (symlen > 4)
793 return strcmp (&symname [symlen - 4], "..lk") == 0;
794
795 return false;
796 }
797
798 #define LINKAGE_SYMBOL_REF_P(X) \
799 ((GET_CODE (X) == SYMBOL_REF \
800 && alpha_linkage_symbol_p (XSTR (X, 0))) \
801 || (GET_CODE (X) == CONST \
802 && GET_CODE (XEXP (X, 0)) == PLUS \
803 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
804 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
805 #endif
806
807 /* legitimate_address_p recognizes an RTL expression that is a valid
808 memory address for an instruction. The MODE argument is the
809 machine mode for the MEM expression that wants to use this address.
810
811 For Alpha, we have either a constant address or the sum of a
812 register and a constant address, or just a register. For DImode,
813 any of those forms can be surrounded with an AND that clear the
814 low-order three bits; this is an "unaligned" access. */
815
816 static bool
817 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
818 {
819 /* If this is an ldq_u type address, discard the outer AND. */
820 if (mode == DImode
821 && GET_CODE (x) == AND
822 && CONST_INT_P (XEXP (x, 1))
823 && INTVAL (XEXP (x, 1)) == -8)
824 x = XEXP (x, 0);
825
826 /* Discard non-paradoxical subregs. */
827 if (GET_CODE (x) == SUBREG
828 && (GET_MODE_SIZE (GET_MODE (x))
829 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
830 x = SUBREG_REG (x);
831
832 /* Unadorned general registers are valid. */
833 if (REG_P (x)
834 && (strict
835 ? STRICT_REG_OK_FOR_BASE_P (x)
836 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
837 return true;
838
839 /* Constant addresses (i.e. +/- 32k) are valid. */
840 if (CONSTANT_ADDRESS_P (x))
841 return true;
842
843 #if TARGET_ABI_OPEN_VMS
844 if (LINKAGE_SYMBOL_REF_P (x))
845 return true;
846 #endif
847
848 /* Register plus a small constant offset is valid. */
849 if (GET_CODE (x) == PLUS)
850 {
851 rtx ofs = XEXP (x, 1);
852 x = XEXP (x, 0);
853
854 /* Discard non-paradoxical subregs. */
855 if (GET_CODE (x) == SUBREG
856 && (GET_MODE_SIZE (GET_MODE (x))
857 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
858 x = SUBREG_REG (x);
859
860 if (REG_P (x))
861 {
862 if (! strict
863 && NONSTRICT_REG_OK_FP_BASE_P (x)
864 && CONST_INT_P (ofs))
865 return true;
866 if ((strict
867 ? STRICT_REG_OK_FOR_BASE_P (x)
868 : NONSTRICT_REG_OK_FOR_BASE_P (x))
869 && CONSTANT_ADDRESS_P (ofs))
870 return true;
871 }
872 }
873
874 /* If we're managing explicit relocations, LO_SUM is valid, as are small
875 data symbols. Avoid explicit relocations of modes larger than word
876 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
877 else if (TARGET_EXPLICIT_RELOCS
878 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
879 {
880 if (small_symbolic_operand (x, Pmode))
881 return true;
882
883 if (GET_CODE (x) == LO_SUM)
884 {
885 rtx ofs = XEXP (x, 1);
886 x = XEXP (x, 0);
887
888 /* Discard non-paradoxical subregs. */
889 if (GET_CODE (x) == SUBREG
890 && (GET_MODE_SIZE (GET_MODE (x))
891 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
892 x = SUBREG_REG (x);
893
894 /* Must have a valid base register. */
895 if (! (REG_P (x)
896 && (strict
897 ? STRICT_REG_OK_FOR_BASE_P (x)
898 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
899 return false;
900
901 /* The symbol must be local. */
902 if (local_symbolic_operand (ofs, Pmode)
903 || dtp32_symbolic_operand (ofs, Pmode)
904 || tp32_symbolic_operand (ofs, Pmode))
905 return true;
906 }
907 }
908
909 return false;
910 }
911
912 /* Build the SYMBOL_REF for __tls_get_addr. */
913
914 static GTY(()) rtx tls_get_addr_libfunc;
915
916 static rtx
917 get_tls_get_addr (void)
918 {
919 if (!tls_get_addr_libfunc)
920 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
921 return tls_get_addr_libfunc;
922 }
923
924 /* Try machine-dependent ways of modifying an illegitimate address
925 to be legitimate. If we find one, return the new, valid address. */
926
927 static rtx
928 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
929 {
930 HOST_WIDE_INT addend;
931
932 /* If the address is (plus reg const_int) and the CONST_INT is not a
933 valid offset, compute the high part of the constant and add it to
934 the register. Then our address is (plus temp low-part-const). */
935 if (GET_CODE (x) == PLUS
936 && REG_P (XEXP (x, 0))
937 && CONST_INT_P (XEXP (x, 1))
938 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
939 {
940 addend = INTVAL (XEXP (x, 1));
941 x = XEXP (x, 0);
942 goto split_addend;
943 }
944
945 /* If the address is (const (plus FOO const_int)), find the low-order
946 part of the CONST_INT. Then load FOO plus any high-order part of the
947 CONST_INT into a register. Our address is (plus reg low-part-const).
948 This is done to reduce the number of GOT entries. */
949 if (can_create_pseudo_p ()
950 && GET_CODE (x) == CONST
951 && GET_CODE (XEXP (x, 0)) == PLUS
952 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
953 {
954 addend = INTVAL (XEXP (XEXP (x, 0), 1));
955 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
956 goto split_addend;
957 }
958
959 /* If we have a (plus reg const), emit the load as in (2), then add
960 the two registers, and finally generate (plus reg low-part-const) as
961 our address. */
962 if (can_create_pseudo_p ()
963 && GET_CODE (x) == PLUS
964 && REG_P (XEXP (x, 0))
965 && GET_CODE (XEXP (x, 1)) == CONST
966 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
967 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
968 {
969 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
970 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
971 XEXP (XEXP (XEXP (x, 1), 0), 0),
972 NULL_RTX, 1, OPTAB_LIB_WIDEN);
973 goto split_addend;
974 }
975
976 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
977 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
978 around +/- 32k offset. */
979 if (TARGET_EXPLICIT_RELOCS
980 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
981 && symbolic_operand (x, Pmode))
982 {
983 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
984
985 switch (tls_symbolic_operand_type (x))
986 {
987 case TLS_MODEL_NONE:
988 break;
989
990 case TLS_MODEL_GLOBAL_DYNAMIC:
991 start_sequence ();
992
993 r0 = gen_rtx_REG (Pmode, 0);
994 r16 = gen_rtx_REG (Pmode, 16);
995 tga = get_tls_get_addr ();
996 dest = gen_reg_rtx (Pmode);
997 seq = GEN_INT (alpha_next_sequence_number++);
998
999 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
1000 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
1001 insn = emit_call_insn (insn);
1002 RTL_CONST_CALL_P (insn) = 1;
1003 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1004
1005 insn = get_insns ();
1006 end_sequence ();
1007
1008 emit_libcall_block (insn, dest, r0, x);
1009 return dest;
1010
1011 case TLS_MODEL_LOCAL_DYNAMIC:
1012 start_sequence ();
1013
1014 r0 = gen_rtx_REG (Pmode, 0);
1015 r16 = gen_rtx_REG (Pmode, 16);
1016 tga = get_tls_get_addr ();
1017 scratch = gen_reg_rtx (Pmode);
1018 seq = GEN_INT (alpha_next_sequence_number++);
1019
1020 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1021 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1022 insn = emit_call_insn (insn);
1023 RTL_CONST_CALL_P (insn) = 1;
1024 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1025
1026 insn = get_insns ();
1027 end_sequence ();
1028
1029 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1030 UNSPEC_TLSLDM_CALL);
1031 emit_libcall_block (insn, scratch, r0, eqv);
1032
1033 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1034 eqv = gen_rtx_CONST (Pmode, eqv);
1035
1036 if (alpha_tls_size == 64)
1037 {
1038 dest = gen_reg_rtx (Pmode);
1039 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1040 emit_insn (gen_adddi3 (dest, dest, scratch));
1041 return dest;
1042 }
1043 if (alpha_tls_size == 32)
1044 {
1045 insn = gen_rtx_HIGH (Pmode, eqv);
1046 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1047 scratch = gen_reg_rtx (Pmode);
1048 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1049 }
1050 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1051
1052 case TLS_MODEL_INITIAL_EXEC:
1053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1054 eqv = gen_rtx_CONST (Pmode, eqv);
1055 tp = gen_reg_rtx (Pmode);
1056 scratch = gen_reg_rtx (Pmode);
1057 dest = gen_reg_rtx (Pmode);
1058
1059 emit_insn (gen_load_tp (tp));
1060 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1061 emit_insn (gen_adddi3 (dest, tp, scratch));
1062 return dest;
1063
1064 case TLS_MODEL_LOCAL_EXEC:
1065 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1066 eqv = gen_rtx_CONST (Pmode, eqv);
1067 tp = gen_reg_rtx (Pmode);
1068
1069 emit_insn (gen_load_tp (tp));
1070 if (alpha_tls_size == 32)
1071 {
1072 insn = gen_rtx_HIGH (Pmode, eqv);
1073 insn = gen_rtx_PLUS (Pmode, tp, insn);
1074 tp = gen_reg_rtx (Pmode);
1075 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1076 }
1077 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1078
1079 default:
1080 gcc_unreachable ();
1081 }
1082
1083 if (local_symbolic_operand (x, Pmode))
1084 {
1085 if (small_symbolic_operand (x, Pmode))
1086 return x;
1087 else
1088 {
1089 if (can_create_pseudo_p ())
1090 scratch = gen_reg_rtx (Pmode);
1091 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1092 gen_rtx_HIGH (Pmode, x)));
1093 return gen_rtx_LO_SUM (Pmode, scratch, x);
1094 }
1095 }
1096 }
1097
1098 return NULL;
1099
1100 split_addend:
1101 {
1102 HOST_WIDE_INT low, high;
1103
1104 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1105 addend -= low;
1106 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1107 addend -= high;
1108
1109 if (addend)
1110 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1111 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1112 1, OPTAB_LIB_WIDEN);
1113 if (high)
1114 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1115 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1116 1, OPTAB_LIB_WIDEN);
1117
1118 return plus_constant (x, low);
1119 }
1120 }
1121
1122
1123 /* Try machine-dependent ways of modifying an illegitimate address
1124 to be legitimate. Return X or the new, valid address. */
1125
1126 static rtx
1127 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1128 enum machine_mode mode)
1129 {
1130 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1131 return new_x ? new_x : x;
1132 }
1133
1134 /* Primarily this is required for TLS symbols, but given that our move
1135 patterns *ought* to be able to handle any symbol at any time, we
1136 should never be spilling symbolic operands to the constant pool, ever. */
1137
1138 static bool
1139 alpha_cannot_force_const_mem (rtx x)
1140 {
1141 enum rtx_code code = GET_CODE (x);
1142 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1143 }
1144
1145 /* We do not allow indirect calls to be optimized into sibling calls, nor
1146 can we allow a call to a function with a different GP to be optimized
1147 into a sibcall. */
1148
1149 static bool
1150 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1151 {
1152 /* Can't do indirect tail calls, since we don't know if the target
1153 uses the same GP. */
1154 if (!decl)
1155 return false;
1156
1157 /* Otherwise, we can make a tail call if the target function shares
1158 the same GP. */
1159 return decl_has_samegp (decl);
1160 }
1161
1162 int
1163 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1164 {
1165 rtx x = *px;
1166
1167 /* Don't re-split. */
1168 if (GET_CODE (x) == LO_SUM)
1169 return -1;
1170
1171 return small_symbolic_operand (x, Pmode) != 0;
1172 }
1173
1174 static int
1175 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1176 {
1177 rtx x = *px;
1178
1179 /* Don't re-split. */
1180 if (GET_CODE (x) == LO_SUM)
1181 return -1;
1182
1183 if (small_symbolic_operand (x, Pmode))
1184 {
1185 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1186 *px = x;
1187 return -1;
1188 }
1189
1190 return 0;
1191 }
1192
1193 rtx
1194 split_small_symbolic_operand (rtx x)
1195 {
1196 x = copy_insn (x);
1197 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1198 return x;
1199 }
1200
1201 /* Indicate that INSN cannot be duplicated. This is true for any insn
1202 that we've marked with gpdisp relocs, since those have to stay in
1203 1-1 correspondence with one another.
1204
1205 Technically we could copy them if we could set up a mapping from one
1206 sequence number to another, across the set of insns to be duplicated.
1207 This seems overly complicated and error-prone since interblock motion
1208 from sched-ebb could move one of the pair of insns to a different block.
1209
1210 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1211 then they'll be in a different block from their ldgp. Which could lead
1212 the bb reorder code to think that it would be ok to copy just the block
1213 containing the call and branch to the block containing the ldgp. */
1214
1215 static bool
1216 alpha_cannot_copy_insn_p (rtx insn)
1217 {
1218 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1219 return false;
1220 if (recog_memoized (insn) >= 0)
1221 return get_attr_cannot_copy (insn);
1222 else
1223 return false;
1224 }
1225
1226
1227 /* Try a machine-dependent way of reloading an illegitimate address
1228 operand. If we find one, push the reload and return the new rtx. */
1229
1230 rtx
1231 alpha_legitimize_reload_address (rtx x,
1232 enum machine_mode mode ATTRIBUTE_UNUSED,
1233 int opnum, int type,
1234 int ind_levels ATTRIBUTE_UNUSED)
1235 {
1236 /* We must recognize output that we have already generated ourselves. */
1237 if (GET_CODE (x) == PLUS
1238 && GET_CODE (XEXP (x, 0)) == PLUS
1239 && REG_P (XEXP (XEXP (x, 0), 0))
1240 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1241 && CONST_INT_P (XEXP (x, 1)))
1242 {
1243 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1244 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1245 opnum, (enum reload_type) type);
1246 return x;
1247 }
1248
1249 /* We wish to handle large displacements off a base register by
1250 splitting the addend across an ldah and the mem insn. This
1251 cuts number of extra insns needed from 3 to 1. */
1252 if (GET_CODE (x) == PLUS
1253 && REG_P (XEXP (x, 0))
1254 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1255 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1256 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1257 {
1258 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1259 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1260 HOST_WIDE_INT high
1261 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1262
1263 /* Check for 32-bit overflow. */
1264 if (high + low != val)
1265 return NULL_RTX;
1266
1267 /* Reload the high part into a base reg; leave the low part
1268 in the mem directly. */
1269 x = gen_rtx_PLUS (GET_MODE (x),
1270 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1271 GEN_INT (high)),
1272 GEN_INT (low));
1273
1274 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1275 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1276 opnum, (enum reload_type) type);
1277 return x;
1278 }
1279
1280 return NULL_RTX;
1281 }
1282 \f
1283 /* Compute a (partial) cost for rtx X. Return true if the complete
1284 cost has been computed, and false if subexpressions should be
1285 scanned. In either case, *TOTAL contains the cost result. */
1286
1287 static bool
1288 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1289 bool speed)
1290 {
1291 enum machine_mode mode = GET_MODE (x);
1292 bool float_mode_p = FLOAT_MODE_P (mode);
1293 const struct alpha_rtx_cost_data *cost_data;
1294
1295 if (!speed)
1296 cost_data = &alpha_rtx_cost_size;
1297 else
1298 cost_data = &alpha_rtx_cost_data[alpha_tune];
1299
1300 switch (code)
1301 {
1302 case CONST_INT:
1303 /* If this is an 8-bit constant, return zero since it can be used
1304 nearly anywhere with no cost. If it is a valid operand for an
1305 ADD or AND, likewise return 0 if we know it will be used in that
1306 context. Otherwise, return 2 since it might be used there later.
1307 All other constants take at least two insns. */
1308 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1309 {
1310 *total = 0;
1311 return true;
1312 }
1313 /* FALLTHRU */
1314
1315 case CONST_DOUBLE:
1316 if (x == CONST0_RTX (mode))
1317 *total = 0;
1318 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1319 || (outer_code == AND && and_operand (x, VOIDmode)))
1320 *total = 0;
1321 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1322 *total = 2;
1323 else
1324 *total = COSTS_N_INSNS (2);
1325 return true;
1326
1327 case CONST:
1328 case SYMBOL_REF:
1329 case LABEL_REF:
1330 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1331 *total = COSTS_N_INSNS (outer_code != MEM);
1332 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1333 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1334 else if (tls_symbolic_operand_type (x))
1335 /* Estimate of cost for call_pal rduniq. */
1336 /* ??? How many insns do we emit here? More than one... */
1337 *total = COSTS_N_INSNS (15);
1338 else
1339 /* Otherwise we do a load from the GOT. */
1340 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1341 return true;
1342
1343 case HIGH:
1344 /* This is effectively an add_operand. */
1345 *total = 2;
1346 return true;
1347
1348 case PLUS:
1349 case MINUS:
1350 if (float_mode_p)
1351 *total = cost_data->fp_add;
1352 else if (GET_CODE (XEXP (x, 0)) == MULT
1353 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1354 {
1355 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1356 (enum rtx_code) outer_code, speed)
1357 + rtx_cost (XEXP (x, 1),
1358 (enum rtx_code) outer_code, speed)
1359 + COSTS_N_INSNS (1));
1360 return true;
1361 }
1362 return false;
1363
1364 case MULT:
1365 if (float_mode_p)
1366 *total = cost_data->fp_mult;
1367 else if (mode == DImode)
1368 *total = cost_data->int_mult_di;
1369 else
1370 *total = cost_data->int_mult_si;
1371 return false;
1372
1373 case ASHIFT:
1374 if (CONST_INT_P (XEXP (x, 1))
1375 && INTVAL (XEXP (x, 1)) <= 3)
1376 {
1377 *total = COSTS_N_INSNS (1);
1378 return false;
1379 }
1380 /* FALLTHRU */
1381
1382 case ASHIFTRT:
1383 case LSHIFTRT:
1384 *total = cost_data->int_shift;
1385 return false;
1386
1387 case IF_THEN_ELSE:
1388 if (float_mode_p)
1389 *total = cost_data->fp_add;
1390 else
1391 *total = cost_data->int_cmov;
1392 return false;
1393
1394 case DIV:
1395 case UDIV:
1396 case MOD:
1397 case UMOD:
1398 if (!float_mode_p)
1399 *total = cost_data->int_div;
1400 else if (mode == SFmode)
1401 *total = cost_data->fp_div_sf;
1402 else
1403 *total = cost_data->fp_div_df;
1404 return false;
1405
1406 case MEM:
1407 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1408 return true;
1409
1410 case NEG:
1411 if (! float_mode_p)
1412 {
1413 *total = COSTS_N_INSNS (1);
1414 return false;
1415 }
1416 /* FALLTHRU */
1417
1418 case ABS:
1419 if (! float_mode_p)
1420 {
1421 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1422 return false;
1423 }
1424 /* FALLTHRU */
1425
1426 case FLOAT:
1427 case UNSIGNED_FLOAT:
1428 case FIX:
1429 case UNSIGNED_FIX:
1430 case FLOAT_TRUNCATE:
1431 *total = cost_data->fp_add;
1432 return false;
1433
1434 case FLOAT_EXTEND:
1435 if (MEM_P (XEXP (x, 0)))
1436 *total = 0;
1437 else
1438 *total = cost_data->fp_add;
1439 return false;
1440
1441 default:
1442 return false;
1443 }
1444 }
1445 \f
1446 /* REF is an alignable memory location. Place an aligned SImode
1447 reference into *PALIGNED_MEM and the number of bits to shift into
1448 *PBITNUM. SCRATCH is a free register for use in reloading out
1449 of range stack slots. */
1450
1451 void
1452 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1453 {
1454 rtx base;
1455 HOST_WIDE_INT disp, offset;
1456
1457 gcc_assert (MEM_P (ref));
1458
1459 if (reload_in_progress
1460 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1461 {
1462 base = find_replacement (&XEXP (ref, 0));
1463 gcc_assert (memory_address_p (GET_MODE (ref), base));
1464 }
1465 else
1466 base = XEXP (ref, 0);
1467
1468 if (GET_CODE (base) == PLUS)
1469 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1470 else
1471 disp = 0;
1472
1473 /* Find the byte offset within an aligned word. If the memory itself is
1474 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1475 will have examined the base register and determined it is aligned, and
1476 thus displacements from it are naturally alignable. */
1477 if (MEM_ALIGN (ref) >= 32)
1478 offset = 0;
1479 else
1480 offset = disp & 3;
1481
1482 /* The location should not cross aligned word boundary. */
1483 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1484 <= GET_MODE_SIZE (SImode));
1485
1486 /* Access the entire aligned word. */
1487 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1488
1489 /* Convert the byte offset within the word to a bit offset. */
1490 if (WORDS_BIG_ENDIAN)
1491 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1492 else
1493 offset *= 8;
1494 *pbitnum = GEN_INT (offset);
1495 }
1496
1497 /* Similar, but just get the address. Handle the two reload cases.
1498 Add EXTRA_OFFSET to the address we return. */
1499
1500 rtx
1501 get_unaligned_address (rtx ref)
1502 {
1503 rtx base;
1504 HOST_WIDE_INT offset = 0;
1505
1506 gcc_assert (MEM_P (ref));
1507
1508 if (reload_in_progress
1509 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1510 {
1511 base = find_replacement (&XEXP (ref, 0));
1512
1513 gcc_assert (memory_address_p (GET_MODE (ref), base));
1514 }
1515 else
1516 base = XEXP (ref, 0);
1517
1518 if (GET_CODE (base) == PLUS)
1519 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1520
1521 return plus_constant (base, offset);
1522 }
1523
1524 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1525 X is always returned in a register. */
1526
1527 rtx
1528 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1529 {
1530 if (GET_CODE (addr) == PLUS)
1531 {
1532 ofs += INTVAL (XEXP (addr, 1));
1533 addr = XEXP (addr, 0);
1534 }
1535
1536 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1537 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1538 }
1539
1540 /* On the Alpha, all (non-symbolic) constants except zero go into
1541 a floating-point register via memory. Note that we cannot
1542 return anything that is not a subset of RCLASS, and that some
1543 symbolic constants cannot be dropped to memory. */
1544
1545 enum reg_class
1546 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1547 {
1548 /* Zero is present in any register class. */
1549 if (x == CONST0_RTX (GET_MODE (x)))
1550 return rclass;
1551
1552 /* These sorts of constants we can easily drop to memory. */
1553 if (CONST_INT_P (x)
1554 || GET_CODE (x) == CONST_DOUBLE
1555 || GET_CODE (x) == CONST_VECTOR)
1556 {
1557 if (rclass == FLOAT_REGS)
1558 return NO_REGS;
1559 if (rclass == ALL_REGS)
1560 return GENERAL_REGS;
1561 return rclass;
1562 }
1563
1564 /* All other kinds of constants should not (and in the case of HIGH
1565 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1566 secondary reload. */
1567 if (CONSTANT_P (x))
1568 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1569
1570 return rclass;
1571 }
1572
1573 /* Inform reload about cases where moving X with a mode MODE to a register in
1574 RCLASS requires an extra scratch or immediate register. Return the class
1575 needed for the immediate register. */
1576
1577 static reg_class_t
1578 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1579 enum machine_mode mode, secondary_reload_info *sri)
1580 {
1581 enum reg_class rclass = (enum reg_class) rclass_i;
1582
1583 /* Loading and storing HImode or QImode values to and from memory
1584 usually requires a scratch register. */
1585 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1586 {
1587 if (any_memory_operand (x, mode))
1588 {
1589 if (in_p)
1590 {
1591 if (!aligned_memory_operand (x, mode))
1592 sri->icode = direct_optab_handler (reload_in_optab, mode);
1593 }
1594 else
1595 sri->icode = direct_optab_handler (reload_out_optab, mode);
1596 return NO_REGS;
1597 }
1598 }
1599
1600 /* We also cannot do integral arithmetic into FP regs, as might result
1601 from register elimination into a DImode fp register. */
1602 if (rclass == FLOAT_REGS)
1603 {
1604 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1605 return GENERAL_REGS;
1606 if (in_p && INTEGRAL_MODE_P (mode)
1607 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1608 return GENERAL_REGS;
1609 }
1610
1611 return NO_REGS;
1612 }
1613 \f
1614 /* Subfunction of the following function. Update the flags of any MEM
1615 found in part of X. */
1616
1617 static int
1618 alpha_set_memflags_1 (rtx *xp, void *data)
1619 {
1620 rtx x = *xp, orig = (rtx) data;
1621
1622 if (!MEM_P (x))
1623 return 0;
1624
1625 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1626 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1627 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1628 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1629 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1630
1631 /* Sadly, we cannot use alias sets because the extra aliasing
1632 produced by the AND interferes. Given that two-byte quantities
1633 are the only thing we would be able to differentiate anyway,
1634 there does not seem to be any point in convoluting the early
1635 out of the alias check. */
1636
1637 return -1;
1638 }
1639
1640 /* Given SEQ, which is an INSN list, look for any MEMs in either
1641 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1642 volatile flags from REF into each of the MEMs found. If REF is not
1643 a MEM, don't do anything. */
1644
1645 void
1646 alpha_set_memflags (rtx seq, rtx ref)
1647 {
1648 rtx insn;
1649
1650 if (!MEM_P (ref))
1651 return;
1652
1653 /* This is only called from alpha.md, after having had something
1654 generated from one of the insn patterns. So if everything is
1655 zero, the pattern is already up-to-date. */
1656 if (!MEM_VOLATILE_P (ref)
1657 && !MEM_IN_STRUCT_P (ref)
1658 && !MEM_SCALAR_P (ref)
1659 && !MEM_NOTRAP_P (ref)
1660 && !MEM_READONLY_P (ref))
1661 return;
1662
1663 for (insn = seq; insn; insn = NEXT_INSN (insn))
1664 if (INSN_P (insn))
1665 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1666 else
1667 gcc_unreachable ();
1668 }
1669 \f
1670 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1671 int, bool);
1672
1673 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1674 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1675 and return pc_rtx if successful. */
1676
1677 static rtx
1678 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1679 HOST_WIDE_INT c, int n, bool no_output)
1680 {
1681 HOST_WIDE_INT new_const;
1682 int i, bits;
1683 /* Use a pseudo if highly optimizing and still generating RTL. */
1684 rtx subtarget
1685 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1686 rtx temp, insn;
1687
1688 /* If this is a sign-extended 32-bit constant, we can do this in at most
1689 three insns, so do it if we have enough insns left. We always have
1690 a sign-extended 32-bit constant when compiling on a narrow machine. */
1691
1692 if (HOST_BITS_PER_WIDE_INT != 64
1693 || c >> 31 == -1 || c >> 31 == 0)
1694 {
1695 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1696 HOST_WIDE_INT tmp1 = c - low;
1697 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1698 HOST_WIDE_INT extra = 0;
1699
1700 /* If HIGH will be interpreted as negative but the constant is
1701 positive, we must adjust it to do two ldha insns. */
1702
1703 if ((high & 0x8000) != 0 && c >= 0)
1704 {
1705 extra = 0x4000;
1706 tmp1 -= 0x40000000;
1707 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1708 }
1709
1710 if (c == low || (low == 0 && extra == 0))
1711 {
1712 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1713 but that meant that we can't handle INT_MIN on 32-bit machines
1714 (like NT/Alpha), because we recurse indefinitely through
1715 emit_move_insn to gen_movdi. So instead, since we know exactly
1716 what we want, create it explicitly. */
1717
1718 if (no_output)
1719 return pc_rtx;
1720 if (target == NULL)
1721 target = gen_reg_rtx (mode);
1722 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1723 return target;
1724 }
1725 else if (n >= 2 + (extra != 0))
1726 {
1727 if (no_output)
1728 return pc_rtx;
1729 if (!can_create_pseudo_p ())
1730 {
1731 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1732 temp = target;
1733 }
1734 else
1735 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1736 subtarget, mode);
1737
1738 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1739 This means that if we go through expand_binop, we'll try to
1740 generate extensions, etc, which will require new pseudos, which
1741 will fail during some split phases. The SImode add patterns
1742 still exist, but are not named. So build the insns by hand. */
1743
1744 if (extra != 0)
1745 {
1746 if (! subtarget)
1747 subtarget = gen_reg_rtx (mode);
1748 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1749 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1750 emit_insn (insn);
1751 temp = subtarget;
1752 }
1753
1754 if (target == NULL)
1755 target = gen_reg_rtx (mode);
1756 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1757 insn = gen_rtx_SET (VOIDmode, target, insn);
1758 emit_insn (insn);
1759 return target;
1760 }
1761 }
1762
1763 /* If we couldn't do it that way, try some other methods. But if we have
1764 no instructions left, don't bother. Likewise, if this is SImode and
1765 we can't make pseudos, we can't do anything since the expand_binop
1766 and expand_unop calls will widen and try to make pseudos. */
1767
1768 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1769 return 0;
1770
1771 /* Next, see if we can load a related constant and then shift and possibly
1772 negate it to get the constant we want. Try this once each increasing
1773 numbers of insns. */
1774
1775 for (i = 1; i < n; i++)
1776 {
1777 /* First, see if minus some low bits, we've an easy load of
1778 high bits. */
1779
1780 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1781 if (new_const != 0)
1782 {
1783 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1784 if (temp)
1785 {
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1789 target, 0, OPTAB_WIDEN);
1790 }
1791 }
1792
1793 /* Next try complementing. */
1794 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1795 if (temp)
1796 {
1797 if (no_output)
1798 return temp;
1799 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1800 }
1801
1802 /* Next try to form a constant and do a left shift. We can do this
1803 if some low-order bits are zero; the exact_log2 call below tells
1804 us that information. The bits we are shifting out could be any
1805 value, but here we'll just try the 0- and sign-extended forms of
1806 the constant. To try to increase the chance of having the same
1807 constant in more than one insn, start at the highest number of
1808 bits to shift, but try all possibilities in case a ZAPNOT will
1809 be useful. */
1810
1811 bits = exact_log2 (c & -c);
1812 if (bits > 0)
1813 for (; bits > 0; bits--)
1814 {
1815 new_const = c >> bits;
1816 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1817 if (!temp && c < 0)
1818 {
1819 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1820 temp = alpha_emit_set_const (subtarget, mode, new_const,
1821 i, no_output);
1822 }
1823 if (temp)
1824 {
1825 if (no_output)
1826 return temp;
1827 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1828 target, 0, OPTAB_WIDEN);
1829 }
1830 }
1831
1832 /* Now try high-order zero bits. Here we try the shifted-in bits as
1833 all zero and all ones. Be careful to avoid shifting outside the
1834 mode and to avoid shifting outside the host wide int size. */
1835 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1836 confuse the recursive call and set all of the high 32 bits. */
1837
1838 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1839 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1840 if (bits > 0)
1841 for (; bits > 0; bits--)
1842 {
1843 new_const = c << bits;
1844 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1845 if (!temp)
1846 {
1847 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1848 temp = alpha_emit_set_const (subtarget, mode, new_const,
1849 i, no_output);
1850 }
1851 if (temp)
1852 {
1853 if (no_output)
1854 return temp;
1855 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1856 target, 1, OPTAB_WIDEN);
1857 }
1858 }
1859
1860 /* Now try high-order 1 bits. We get that with a sign-extension.
1861 But one bit isn't enough here. Be careful to avoid shifting outside
1862 the mode and to avoid shifting outside the host wide int size. */
1863
1864 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1865 - floor_log2 (~ c) - 2);
1866 if (bits > 0)
1867 for (; bits > 0; bits--)
1868 {
1869 new_const = c << bits;
1870 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1871 if (!temp)
1872 {
1873 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1874 temp = alpha_emit_set_const (subtarget, mode, new_const,
1875 i, no_output);
1876 }
1877 if (temp)
1878 {
1879 if (no_output)
1880 return temp;
1881 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1882 target, 0, OPTAB_WIDEN);
1883 }
1884 }
1885 }
1886
1887 #if HOST_BITS_PER_WIDE_INT == 64
1888 /* Finally, see if can load a value into the target that is the same as the
1889 constant except that all bytes that are 0 are changed to be 0xff. If we
1890 can, then we can do a ZAPNOT to obtain the desired constant. */
1891
1892 new_const = c;
1893 for (i = 0; i < 64; i += 8)
1894 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1895 new_const |= (HOST_WIDE_INT) 0xff << i;
1896
1897 /* We are only called for SImode and DImode. If this is SImode, ensure that
1898 we are sign extended to a full word. */
1899
1900 if (mode == SImode)
1901 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1902
1903 if (new_const != c)
1904 {
1905 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1906 if (temp)
1907 {
1908 if (no_output)
1909 return temp;
1910 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1911 target, 0, OPTAB_WIDEN);
1912 }
1913 }
1914 #endif
1915
1916 return 0;
1917 }
1918
1919 /* Try to output insns to set TARGET equal to the constant C if it can be
1920 done in less than N insns. Do all computations in MODE. Returns the place
1921 where the output has been placed if it can be done and the insns have been
1922 emitted. If it would take more than N insns, zero is returned and no
1923 insns and emitted. */
1924
1925 static rtx
1926 alpha_emit_set_const (rtx target, enum machine_mode mode,
1927 HOST_WIDE_INT c, int n, bool no_output)
1928 {
1929 enum machine_mode orig_mode = mode;
1930 rtx orig_target = target;
1931 rtx result = 0;
1932 int i;
1933
1934 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1935 can't load this constant in one insn, do this in DImode. */
1936 if (!can_create_pseudo_p () && mode == SImode
1937 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1938 {
1939 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1940 if (result)
1941 return result;
1942
1943 target = no_output ? NULL : gen_lowpart (DImode, target);
1944 mode = DImode;
1945 }
1946 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1947 {
1948 target = no_output ? NULL : gen_lowpart (DImode, target);
1949 mode = DImode;
1950 }
1951
1952 /* Try 1 insn, then 2, then up to N. */
1953 for (i = 1; i <= n; i++)
1954 {
1955 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1956 if (result)
1957 {
1958 rtx insn, set;
1959
1960 if (no_output)
1961 return result;
1962
1963 insn = get_last_insn ();
1964 set = single_set (insn);
1965 if (! CONSTANT_P (SET_SRC (set)))
1966 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1967 break;
1968 }
1969 }
1970
1971 /* Allow for the case where we changed the mode of TARGET. */
1972 if (result)
1973 {
1974 if (result == target)
1975 result = orig_target;
1976 else if (mode != orig_mode)
1977 result = gen_lowpart (orig_mode, result);
1978 }
1979
1980 return result;
1981 }
1982
1983 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1984 fall back to a straight forward decomposition. We do this to avoid
1985 exponential run times encountered when looking for longer sequences
1986 with alpha_emit_set_const. */
1987
1988 static rtx
1989 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1990 {
1991 HOST_WIDE_INT d1, d2, d3, d4;
1992
1993 /* Decompose the entire word */
1994 #if HOST_BITS_PER_WIDE_INT >= 64
1995 gcc_assert (c2 == -(c1 < 0));
1996 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1997 c1 -= d1;
1998 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1999 c1 = (c1 - d2) >> 32;
2000 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2001 c1 -= d3;
2002 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2003 gcc_assert (c1 == d4);
2004 #else
2005 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
2006 c1 -= d1;
2007 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2008 gcc_assert (c1 == d2);
2009 c2 += (d2 < 0);
2010 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2011 c2 -= d3;
2012 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2013 gcc_assert (c2 == d4);
2014 #endif
2015
2016 /* Construct the high word */
2017 if (d4)
2018 {
2019 emit_move_insn (target, GEN_INT (d4));
2020 if (d3)
2021 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2022 }
2023 else
2024 emit_move_insn (target, GEN_INT (d3));
2025
2026 /* Shift it into place */
2027 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2028
2029 /* Add in the low bits. */
2030 if (d2)
2031 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2032 if (d1)
2033 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2034
2035 return target;
2036 }
2037
2038 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2039 the low 64 bits. */
2040
2041 static void
2042 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2043 {
2044 HOST_WIDE_INT i0, i1;
2045
2046 if (GET_CODE (x) == CONST_VECTOR)
2047 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2048
2049
2050 if (CONST_INT_P (x))
2051 {
2052 i0 = INTVAL (x);
2053 i1 = -(i0 < 0);
2054 }
2055 else if (HOST_BITS_PER_WIDE_INT >= 64)
2056 {
2057 i0 = CONST_DOUBLE_LOW (x);
2058 i1 = -(i0 < 0);
2059 }
2060 else
2061 {
2062 i0 = CONST_DOUBLE_LOW (x);
2063 i1 = CONST_DOUBLE_HIGH (x);
2064 }
2065
2066 *p0 = i0;
2067 *p1 = i1;
2068 }
2069
2070 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2071 are willing to load the value into a register via a move pattern.
2072 Normally this is all symbolic constants, integral constants that
2073 take three or fewer instructions, and floating-point zero. */
2074
2075 bool
2076 alpha_legitimate_constant_p (rtx x)
2077 {
2078 enum machine_mode mode = GET_MODE (x);
2079 HOST_WIDE_INT i0, i1;
2080
2081 switch (GET_CODE (x))
2082 {
2083 case LABEL_REF:
2084 case HIGH:
2085 return true;
2086
2087 case CONST:
2088 if (GET_CODE (XEXP (x, 0)) == PLUS
2089 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2090 x = XEXP (XEXP (x, 0), 0);
2091 else
2092 return true;
2093
2094 if (GET_CODE (x) != SYMBOL_REF)
2095 return true;
2096
2097 /* FALLTHRU */
2098
2099 case SYMBOL_REF:
2100 /* TLS symbols are never valid. */
2101 return SYMBOL_REF_TLS_MODEL (x) == 0;
2102
2103 case CONST_DOUBLE:
2104 if (x == CONST0_RTX (mode))
2105 return true;
2106 if (FLOAT_MODE_P (mode))
2107 return false;
2108 goto do_integer;
2109
2110 case CONST_VECTOR:
2111 if (x == CONST0_RTX (mode))
2112 return true;
2113 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2114 return false;
2115 if (GET_MODE_SIZE (mode) != 8)
2116 return false;
2117 goto do_integer;
2118
2119 case CONST_INT:
2120 do_integer:
2121 if (TARGET_BUILD_CONSTANTS)
2122 return true;
2123 alpha_extract_integer (x, &i0, &i1);
2124 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2125 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2126 return false;
2127
2128 default:
2129 return false;
2130 }
2131 }
2132
2133 /* Operand 1 is known to be a constant, and should require more than one
2134 instruction to load. Emit that multi-part load. */
2135
2136 bool
2137 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2138 {
2139 HOST_WIDE_INT i0, i1;
2140 rtx temp = NULL_RTX;
2141
2142 alpha_extract_integer (operands[1], &i0, &i1);
2143
2144 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2145 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2146
2147 if (!temp && TARGET_BUILD_CONSTANTS)
2148 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2149
2150 if (temp)
2151 {
2152 if (!rtx_equal_p (operands[0], temp))
2153 emit_move_insn (operands[0], temp);
2154 return true;
2155 }
2156
2157 return false;
2158 }
2159
2160 /* Expand a move instruction; return true if all work is done.
2161 We don't handle non-bwx subword loads here. */
2162
2163 bool
2164 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2165 {
2166 rtx tmp;
2167
2168 /* If the output is not a register, the input must be. */
2169 if (MEM_P (operands[0])
2170 && ! reg_or_0_operand (operands[1], mode))
2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Allow legitimize_address to perform some simplifications. */
2174 if (mode == Pmode && symbolic_operand (operands[1], mode))
2175 {
2176 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2177 if (tmp)
2178 {
2179 if (tmp == operands[0])
2180 return true;
2181 operands[1] = tmp;
2182 return false;
2183 }
2184 }
2185
2186 /* Early out for non-constants and valid constants. */
2187 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2188 return false;
2189
2190 /* Split large integers. */
2191 if (CONST_INT_P (operands[1])
2192 || GET_CODE (operands[1]) == CONST_DOUBLE
2193 || GET_CODE (operands[1]) == CONST_VECTOR)
2194 {
2195 if (alpha_split_const_mov (mode, operands))
2196 return true;
2197 }
2198
2199 /* Otherwise we've nothing left but to drop the thing to memory. */
2200 tmp = force_const_mem (mode, operands[1]);
2201
2202 if (tmp == NULL_RTX)
2203 return false;
2204
2205 if (reload_in_progress)
2206 {
2207 emit_move_insn (operands[0], XEXP (tmp, 0));
2208 operands[1] = replace_equiv_address (tmp, operands[0]);
2209 }
2210 else
2211 operands[1] = validize_mem (tmp);
2212 return false;
2213 }
2214
2215 /* Expand a non-bwx QImode or HImode move instruction;
2216 return true if all work is done. */
2217
2218 bool
2219 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2220 {
2221 rtx seq;
2222
2223 /* If the output is not a register, the input must be. */
2224 if (MEM_P (operands[0]))
2225 operands[1] = force_reg (mode, operands[1]);
2226
2227 /* Handle four memory cases, unaligned and aligned for either the input
2228 or the output. The only case where we can be called during reload is
2229 for aligned loads; all other cases require temporaries. */
2230
2231 if (any_memory_operand (operands[1], mode))
2232 {
2233 if (aligned_memory_operand (operands[1], mode))
2234 {
2235 if (reload_in_progress)
2236 {
2237 if (mode == QImode)
2238 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2239 else
2240 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2241 emit_insn (seq);
2242 }
2243 else
2244 {
2245 rtx aligned_mem, bitnum;
2246 rtx scratch = gen_reg_rtx (SImode);
2247 rtx subtarget;
2248 bool copyout;
2249
2250 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2251
2252 subtarget = operands[0];
2253 if (REG_P (subtarget))
2254 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2255 else
2256 subtarget = gen_reg_rtx (DImode), copyout = true;
2257
2258 if (mode == QImode)
2259 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2260 bitnum, scratch);
2261 else
2262 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2263 bitnum, scratch);
2264 emit_insn (seq);
2265
2266 if (copyout)
2267 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2268 }
2269 }
2270 else
2271 {
2272 /* Don't pass these as parameters since that makes the generated
2273 code depend on parameter evaluation order which will cause
2274 bootstrap failures. */
2275
2276 rtx temp1, temp2, subtarget, ua;
2277 bool copyout;
2278
2279 temp1 = gen_reg_rtx (DImode);
2280 temp2 = gen_reg_rtx (DImode);
2281
2282 subtarget = operands[0];
2283 if (REG_P (subtarget))
2284 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2285 else
2286 subtarget = gen_reg_rtx (DImode), copyout = true;
2287
2288 ua = get_unaligned_address (operands[1]);
2289 if (mode == QImode)
2290 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2291 else
2292 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2293
2294 alpha_set_memflags (seq, operands[1]);
2295 emit_insn (seq);
2296
2297 if (copyout)
2298 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2299 }
2300 return true;
2301 }
2302
2303 if (any_memory_operand (operands[0], mode))
2304 {
2305 if (aligned_memory_operand (operands[0], mode))
2306 {
2307 rtx aligned_mem, bitnum;
2308 rtx temp1 = gen_reg_rtx (SImode);
2309 rtx temp2 = gen_reg_rtx (SImode);
2310
2311 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2312
2313 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2314 temp1, temp2));
2315 }
2316 else
2317 {
2318 rtx temp1 = gen_reg_rtx (DImode);
2319 rtx temp2 = gen_reg_rtx (DImode);
2320 rtx temp3 = gen_reg_rtx (DImode);
2321 rtx ua = get_unaligned_address (operands[0]);
2322
2323 if (mode == QImode)
2324 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2325 else
2326 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2327
2328 alpha_set_memflags (seq, operands[0]);
2329 emit_insn (seq);
2330 }
2331 return true;
2332 }
2333
2334 return false;
2335 }
2336
2337 /* Implement the movmisalign patterns. One of the operands is a memory
2338 that is not naturally aligned. Emit instructions to load it. */
2339
2340 void
2341 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2342 {
2343 /* Honor misaligned loads, for those we promised to do so. */
2344 if (MEM_P (operands[1]))
2345 {
2346 rtx tmp;
2347
2348 if (register_operand (operands[0], mode))
2349 tmp = operands[0];
2350 else
2351 tmp = gen_reg_rtx (mode);
2352
2353 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2354 if (tmp != operands[0])
2355 emit_move_insn (operands[0], tmp);
2356 }
2357 else if (MEM_P (operands[0]))
2358 {
2359 if (!reg_or_0_operand (operands[1], mode))
2360 operands[1] = force_reg (mode, operands[1]);
2361 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2362 }
2363 else
2364 gcc_unreachable ();
2365 }
2366
2367 /* Generate an unsigned DImode to FP conversion. This is the same code
2368 optabs would emit if we didn't have TFmode patterns.
2369
2370 For SFmode, this is the only construction I've found that can pass
2371 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2372 intermediates will work, because you'll get intermediate rounding
2373 that ruins the end result. Some of this could be fixed by turning
2374 on round-to-positive-infinity, but that requires diddling the fpsr,
2375 which kills performance. I tried turning this around and converting
2376 to a negative number, so that I could turn on /m, but either I did
2377 it wrong or there's something else cause I wound up with the exact
2378 same single-bit error. There is a branch-less form of this same code:
2379
2380 srl $16,1,$1
2381 and $16,1,$2
2382 cmplt $16,0,$3
2383 or $1,$2,$2
2384 cmovge $16,$16,$2
2385 itoft $3,$f10
2386 itoft $2,$f11
2387 cvtqs $f11,$f11
2388 adds $f11,$f11,$f0
2389 fcmoveq $f10,$f11,$f0
2390
2391 I'm not using it because it's the same number of instructions as
2392 this branch-full form, and it has more serialized long latency
2393 instructions on the critical path.
2394
2395 For DFmode, we can avoid rounding errors by breaking up the word
2396 into two pieces, converting them separately, and adding them back:
2397
2398 LC0: .long 0,0x5f800000
2399
2400 itoft $16,$f11
2401 lda $2,LC0
2402 cmplt $16,0,$1
2403 cpyse $f11,$f31,$f10
2404 cpyse $f31,$f11,$f11
2405 s4addq $1,$2,$1
2406 lds $f12,0($1)
2407 cvtqt $f10,$f10
2408 cvtqt $f11,$f11
2409 addt $f12,$f10,$f0
2410 addt $f0,$f11,$f0
2411
2412 This doesn't seem to be a clear-cut win over the optabs form.
2413 It probably all depends on the distribution of numbers being
2414 converted -- in the optabs form, all but high-bit-set has a
2415 much lower minimum execution time. */
2416
2417 void
2418 alpha_emit_floatuns (rtx operands[2])
2419 {
2420 rtx neglab, donelab, i0, i1, f0, in, out;
2421 enum machine_mode mode;
2422
2423 out = operands[0];
2424 in = force_reg (DImode, operands[1]);
2425 mode = GET_MODE (out);
2426 neglab = gen_label_rtx ();
2427 donelab = gen_label_rtx ();
2428 i0 = gen_reg_rtx (DImode);
2429 i1 = gen_reg_rtx (DImode);
2430 f0 = gen_reg_rtx (mode);
2431
2432 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2433
2434 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2435 emit_jump_insn (gen_jump (donelab));
2436 emit_barrier ();
2437
2438 emit_label (neglab);
2439
2440 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2441 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2442 emit_insn (gen_iordi3 (i0, i0, i1));
2443 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2444 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2445
2446 emit_label (donelab);
2447 }
2448
2449 /* Generate the comparison for a conditional branch. */
2450
2451 void
2452 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2453 {
2454 enum rtx_code cmp_code, branch_code;
2455 enum machine_mode branch_mode = VOIDmode;
2456 enum rtx_code code = GET_CODE (operands[0]);
2457 rtx op0 = operands[1], op1 = operands[2];
2458 rtx tem;
2459
2460 if (cmp_mode == TFmode)
2461 {
2462 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2463 op1 = const0_rtx;
2464 cmp_mode = DImode;
2465 }
2466
2467 /* The general case: fold the comparison code to the types of compares
2468 that we have, choosing the branch as necessary. */
2469 switch (code)
2470 {
2471 case EQ: case LE: case LT: case LEU: case LTU:
2472 case UNORDERED:
2473 /* We have these compares: */
2474 cmp_code = code, branch_code = NE;
2475 break;
2476
2477 case NE:
2478 case ORDERED:
2479 /* These must be reversed. */
2480 cmp_code = reverse_condition (code), branch_code = EQ;
2481 break;
2482
2483 case GE: case GT: case GEU: case GTU:
2484 /* For FP, we swap them, for INT, we reverse them. */
2485 if (cmp_mode == DFmode)
2486 {
2487 cmp_code = swap_condition (code);
2488 branch_code = NE;
2489 tem = op0, op0 = op1, op1 = tem;
2490 }
2491 else
2492 {
2493 cmp_code = reverse_condition (code);
2494 branch_code = EQ;
2495 }
2496 break;
2497
2498 default:
2499 gcc_unreachable ();
2500 }
2501
2502 if (cmp_mode == DFmode)
2503 {
2504 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2505 {
2506 /* When we are not as concerned about non-finite values, and we
2507 are comparing against zero, we can branch directly. */
2508 if (op1 == CONST0_RTX (DFmode))
2509 cmp_code = UNKNOWN, branch_code = code;
2510 else if (op0 == CONST0_RTX (DFmode))
2511 {
2512 /* Undo the swap we probably did just above. */
2513 tem = op0, op0 = op1, op1 = tem;
2514 branch_code = swap_condition (cmp_code);
2515 cmp_code = UNKNOWN;
2516 }
2517 }
2518 else
2519 {
2520 /* ??? We mark the branch mode to be CCmode to prevent the
2521 compare and branch from being combined, since the compare
2522 insn follows IEEE rules that the branch does not. */
2523 branch_mode = CCmode;
2524 }
2525 }
2526 else
2527 {
2528 /* The following optimizations are only for signed compares. */
2529 if (code != LEU && code != LTU && code != GEU && code != GTU)
2530 {
2531 /* Whee. Compare and branch against 0 directly. */
2532 if (op1 == const0_rtx)
2533 cmp_code = UNKNOWN, branch_code = code;
2534
2535 /* If the constants doesn't fit into an immediate, but can
2536 be generated by lda/ldah, we adjust the argument and
2537 compare against zero, so we can use beq/bne directly. */
2538 /* ??? Don't do this when comparing against symbols, otherwise
2539 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2540 be declared false out of hand (at least for non-weak). */
2541 else if (CONST_INT_P (op1)
2542 && (code == EQ || code == NE)
2543 && !(symbolic_operand (op0, VOIDmode)
2544 || (REG_P (op0) && REG_POINTER (op0))))
2545 {
2546 rtx n_op1 = GEN_INT (-INTVAL (op1));
2547
2548 if (! satisfies_constraint_I (op1)
2549 && (satisfies_constraint_K (n_op1)
2550 || satisfies_constraint_L (n_op1)))
2551 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2552 }
2553 }
2554
2555 if (!reg_or_0_operand (op0, DImode))
2556 op0 = force_reg (DImode, op0);
2557 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2558 op1 = force_reg (DImode, op1);
2559 }
2560
2561 /* Emit an initial compare instruction, if necessary. */
2562 tem = op0;
2563 if (cmp_code != UNKNOWN)
2564 {
2565 tem = gen_reg_rtx (cmp_mode);
2566 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2567 }
2568
2569 /* Emit the branch instruction. */
2570 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2571 gen_rtx_IF_THEN_ELSE (VOIDmode,
2572 gen_rtx_fmt_ee (branch_code,
2573 branch_mode, tem,
2574 CONST0_RTX (cmp_mode)),
2575 gen_rtx_LABEL_REF (VOIDmode,
2576 operands[3]),
2577 pc_rtx));
2578 emit_jump_insn (tem);
2579 }
2580
2581 /* Certain simplifications can be done to make invalid setcc operations
2582 valid. Return the final comparison, or NULL if we can't work. */
2583
2584 bool
2585 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2586 {
2587 enum rtx_code cmp_code;
2588 enum rtx_code code = GET_CODE (operands[1]);
2589 rtx op0 = operands[2], op1 = operands[3];
2590 rtx tmp;
2591
2592 if (cmp_mode == TFmode)
2593 {
2594 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2595 op1 = const0_rtx;
2596 cmp_mode = DImode;
2597 }
2598
2599 if (cmp_mode == DFmode && !TARGET_FIX)
2600 return 0;
2601
2602 /* The general case: fold the comparison code to the types of compares
2603 that we have, choosing the branch as necessary. */
2604
2605 cmp_code = UNKNOWN;
2606 switch (code)
2607 {
2608 case EQ: case LE: case LT: case LEU: case LTU:
2609 case UNORDERED:
2610 /* We have these compares. */
2611 if (cmp_mode == DFmode)
2612 cmp_code = code, code = NE;
2613 break;
2614
2615 case NE:
2616 if (cmp_mode == DImode && op1 == const0_rtx)
2617 break;
2618 /* FALLTHRU */
2619
2620 case ORDERED:
2621 cmp_code = reverse_condition (code);
2622 code = EQ;
2623 break;
2624
2625 case GE: case GT: case GEU: case GTU:
2626 /* These normally need swapping, but for integer zero we have
2627 special patterns that recognize swapped operands. */
2628 if (cmp_mode == DImode && op1 == const0_rtx)
2629 break;
2630 code = swap_condition (code);
2631 if (cmp_mode == DFmode)
2632 cmp_code = code, code = NE;
2633 tmp = op0, op0 = op1, op1 = tmp;
2634 break;
2635
2636 default:
2637 gcc_unreachable ();
2638 }
2639
2640 if (cmp_mode == DImode)
2641 {
2642 if (!register_operand (op0, DImode))
2643 op0 = force_reg (DImode, op0);
2644 if (!reg_or_8bit_operand (op1, DImode))
2645 op1 = force_reg (DImode, op1);
2646 }
2647
2648 /* Emit an initial compare instruction, if necessary. */
2649 if (cmp_code != UNKNOWN)
2650 {
2651 tmp = gen_reg_rtx (cmp_mode);
2652 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2653 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2654
2655 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2656 op1 = const0_rtx;
2657 }
2658
2659 /* Emit the setcc instruction. */
2660 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2661 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2662 return true;
2663 }
2664
2665
2666 /* Rewrite a comparison against zero CMP of the form
2667 (CODE (cc0) (const_int 0)) so it can be written validly in
2668 a conditional move (if_then_else CMP ...).
2669 If both of the operands that set cc0 are nonzero we must emit
2670 an insn to perform the compare (it can't be done within
2671 the conditional move). */
2672
2673 rtx
2674 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2675 {
2676 enum rtx_code code = GET_CODE (cmp);
2677 enum rtx_code cmov_code = NE;
2678 rtx op0 = XEXP (cmp, 0);
2679 rtx op1 = XEXP (cmp, 1);
2680 enum machine_mode cmp_mode
2681 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2682 enum machine_mode cmov_mode = VOIDmode;
2683 int local_fast_math = flag_unsafe_math_optimizations;
2684 rtx tem;
2685
2686 if (cmp_mode == TFmode)
2687 {
2688 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2689 op1 = const0_rtx;
2690 cmp_mode = DImode;
2691 }
2692
2693 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2694
2695 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2696 {
2697 enum rtx_code cmp_code;
2698
2699 if (! TARGET_FIX)
2700 return 0;
2701
2702 /* If we have fp<->int register move instructions, do a cmov by
2703 performing the comparison in fp registers, and move the
2704 zero/nonzero value to integer registers, where we can then
2705 use a normal cmov, or vice-versa. */
2706
2707 switch (code)
2708 {
2709 case EQ: case LE: case LT: case LEU: case LTU:
2710 /* We have these compares. */
2711 cmp_code = code, code = NE;
2712 break;
2713
2714 case NE:
2715 /* This must be reversed. */
2716 cmp_code = EQ, code = EQ;
2717 break;
2718
2719 case GE: case GT: case GEU: case GTU:
2720 /* These normally need swapping, but for integer zero we have
2721 special patterns that recognize swapped operands. */
2722 if (cmp_mode == DImode && op1 == const0_rtx)
2723 cmp_code = code, code = NE;
2724 else
2725 {
2726 cmp_code = swap_condition (code);
2727 code = NE;
2728 tem = op0, op0 = op1, op1 = tem;
2729 }
2730 break;
2731
2732 default:
2733 gcc_unreachable ();
2734 }
2735
2736 tem = gen_reg_rtx (cmp_mode);
2737 emit_insn (gen_rtx_SET (VOIDmode, tem,
2738 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2739 op0, op1)));
2740
2741 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2742 op0 = gen_lowpart (cmp_mode, tem);
2743 op1 = CONST0_RTX (cmp_mode);
2744 local_fast_math = 1;
2745 }
2746
2747 /* We may be able to use a conditional move directly.
2748 This avoids emitting spurious compares. */
2749 if (signed_comparison_operator (cmp, VOIDmode)
2750 && (cmp_mode == DImode || local_fast_math)
2751 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2752 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2753
2754 /* We can't put the comparison inside the conditional move;
2755 emit a compare instruction and put that inside the
2756 conditional move. Make sure we emit only comparisons we have;
2757 swap or reverse as necessary. */
2758
2759 if (!can_create_pseudo_p ())
2760 return NULL_RTX;
2761
2762 switch (code)
2763 {
2764 case EQ: case LE: case LT: case LEU: case LTU:
2765 /* We have these compares: */
2766 break;
2767
2768 case NE:
2769 /* This must be reversed. */
2770 code = reverse_condition (code);
2771 cmov_code = EQ;
2772 break;
2773
2774 case GE: case GT: case GEU: case GTU:
2775 /* These must be swapped. */
2776 if (op1 != CONST0_RTX (cmp_mode))
2777 {
2778 code = swap_condition (code);
2779 tem = op0, op0 = op1, op1 = tem;
2780 }
2781 break;
2782
2783 default:
2784 gcc_unreachable ();
2785 }
2786
2787 if (cmp_mode == DImode)
2788 {
2789 if (!reg_or_0_operand (op0, DImode))
2790 op0 = force_reg (DImode, op0);
2791 if (!reg_or_8bit_operand (op1, DImode))
2792 op1 = force_reg (DImode, op1);
2793 }
2794
2795 /* ??? We mark the branch mode to be CCmode to prevent the compare
2796 and cmov from being combined, since the compare insn follows IEEE
2797 rules that the cmov does not. */
2798 if (cmp_mode == DFmode && !local_fast_math)
2799 cmov_mode = CCmode;
2800
2801 tem = gen_reg_rtx (cmp_mode);
2802 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2803 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2804 }
2805
2806 /* Simplify a conditional move of two constants into a setcc with
2807 arithmetic. This is done with a splitter since combine would
2808 just undo the work if done during code generation. It also catches
2809 cases we wouldn't have before cse. */
2810
2811 int
2812 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2813 rtx t_rtx, rtx f_rtx)
2814 {
2815 HOST_WIDE_INT t, f, diff;
2816 enum machine_mode mode;
2817 rtx target, subtarget, tmp;
2818
2819 mode = GET_MODE (dest);
2820 t = INTVAL (t_rtx);
2821 f = INTVAL (f_rtx);
2822 diff = t - f;
2823
2824 if (((code == NE || code == EQ) && diff < 0)
2825 || (code == GE || code == GT))
2826 {
2827 code = reverse_condition (code);
2828 diff = t, t = f, f = diff;
2829 diff = t - f;
2830 }
2831
2832 subtarget = target = dest;
2833 if (mode != DImode)
2834 {
2835 target = gen_lowpart (DImode, dest);
2836 if (can_create_pseudo_p ())
2837 subtarget = gen_reg_rtx (DImode);
2838 else
2839 subtarget = target;
2840 }
2841 /* Below, we must be careful to use copy_rtx on target and subtarget
2842 in intermediate insns, as they may be a subreg rtx, which may not
2843 be shared. */
2844
2845 if (f == 0 && exact_log2 (diff) > 0
2846 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2847 viable over a longer latency cmove. On EV5, the E0 slot is a
2848 scarce resource, and on EV4 shift has the same latency as a cmove. */
2849 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2850 {
2851 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2852 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2853
2854 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2855 GEN_INT (exact_log2 (t)));
2856 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2857 }
2858 else if (f == 0 && t == -1)
2859 {
2860 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2861 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2862
2863 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2864 }
2865 else if (diff == 1 || diff == 4 || diff == 8)
2866 {
2867 rtx add_op;
2868
2869 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2870 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2871
2872 if (diff == 1)
2873 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2874 else
2875 {
2876 add_op = GEN_INT (f);
2877 if (sext_add_operand (add_op, mode))
2878 {
2879 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2880 GEN_INT (diff));
2881 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2882 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2883 }
2884 else
2885 return 0;
2886 }
2887 }
2888 else
2889 return 0;
2890
2891 return 1;
2892 }
2893 \f
2894 /* Look up the function X_floating library function name for the
2895 given operation. */
2896
2897 struct GTY(()) xfloating_op
2898 {
2899 const enum rtx_code code;
2900 const char *const GTY((skip)) osf_func;
2901 const char *const GTY((skip)) vms_func;
2902 rtx libcall;
2903 };
2904
2905 static GTY(()) struct xfloating_op xfloating_ops[] =
2906 {
2907 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2908 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2909 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2910 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2911 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2912 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2913 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2914 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2915 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2916 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2917 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2918 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2919 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2920 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2921 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2922 };
2923
2924 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2925 {
2926 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2927 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2928 };
2929
2930 static rtx
2931 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2932 {
2933 struct xfloating_op *ops = xfloating_ops;
2934 long n = ARRAY_SIZE (xfloating_ops);
2935 long i;
2936
2937 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2938
2939 /* How irritating. Nothing to key off for the main table. */
2940 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2941 {
2942 ops = vax_cvt_ops;
2943 n = ARRAY_SIZE (vax_cvt_ops);
2944 }
2945
2946 for (i = 0; i < n; ++i, ++ops)
2947 if (ops->code == code)
2948 {
2949 rtx func = ops->libcall;
2950 if (!func)
2951 {
2952 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2953 ? ops->vms_func : ops->osf_func);
2954 ops->libcall = func;
2955 }
2956 return func;
2957 }
2958
2959 gcc_unreachable ();
2960 }
2961
2962 /* Most X_floating operations take the rounding mode as an argument.
2963 Compute that here. */
2964
2965 static int
2966 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2967 enum alpha_fp_rounding_mode round)
2968 {
2969 int mode;
2970
2971 switch (round)
2972 {
2973 case ALPHA_FPRM_NORM:
2974 mode = 2;
2975 break;
2976 case ALPHA_FPRM_MINF:
2977 mode = 1;
2978 break;
2979 case ALPHA_FPRM_CHOP:
2980 mode = 0;
2981 break;
2982 case ALPHA_FPRM_DYN:
2983 mode = 4;
2984 break;
2985 default:
2986 gcc_unreachable ();
2987
2988 /* XXX For reference, round to +inf is mode = 3. */
2989 }
2990
2991 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2992 mode |= 0x10000;
2993
2994 return mode;
2995 }
2996
2997 /* Emit an X_floating library function call.
2998
2999 Note that these functions do not follow normal calling conventions:
3000 TFmode arguments are passed in two integer registers (as opposed to
3001 indirect); TFmode return values appear in R16+R17.
3002
3003 FUNC is the function to call.
3004 TARGET is where the output belongs.
3005 OPERANDS are the inputs.
3006 NOPERANDS is the count of inputs.
3007 EQUIV is the expression equivalent for the function.
3008 */
3009
3010 static void
3011 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3012 int noperands, rtx equiv)
3013 {
3014 rtx usage = NULL_RTX, tmp, reg;
3015 int regno = 16, i;
3016
3017 start_sequence ();
3018
3019 for (i = 0; i < noperands; ++i)
3020 {
3021 switch (GET_MODE (operands[i]))
3022 {
3023 case TFmode:
3024 reg = gen_rtx_REG (TFmode, regno);
3025 regno += 2;
3026 break;
3027
3028 case DFmode:
3029 reg = gen_rtx_REG (DFmode, regno + 32);
3030 regno += 1;
3031 break;
3032
3033 case VOIDmode:
3034 gcc_assert (CONST_INT_P (operands[i]));
3035 /* FALLTHRU */
3036 case DImode:
3037 reg = gen_rtx_REG (DImode, regno);
3038 regno += 1;
3039 break;
3040
3041 default:
3042 gcc_unreachable ();
3043 }
3044
3045 emit_move_insn (reg, operands[i]);
3046 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3047 }
3048
3049 switch (GET_MODE (target))
3050 {
3051 case TFmode:
3052 reg = gen_rtx_REG (TFmode, 16);
3053 break;
3054 case DFmode:
3055 reg = gen_rtx_REG (DFmode, 32);
3056 break;
3057 case DImode:
3058 reg = gen_rtx_REG (DImode, 0);
3059 break;
3060 default:
3061 gcc_unreachable ();
3062 }
3063
3064 tmp = gen_rtx_MEM (QImode, func);
3065 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3066 const0_rtx, const0_rtx));
3067 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3068 RTL_CONST_CALL_P (tmp) = 1;
3069
3070 tmp = get_insns ();
3071 end_sequence ();
3072
3073 emit_libcall_block (tmp, target, reg, equiv);
3074 }
3075
3076 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3077
3078 void
3079 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3080 {
3081 rtx func;
3082 int mode;
3083 rtx out_operands[3];
3084
3085 func = alpha_lookup_xfloating_lib_func (code);
3086 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3087
3088 out_operands[0] = operands[1];
3089 out_operands[1] = operands[2];
3090 out_operands[2] = GEN_INT (mode);
3091 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3092 gen_rtx_fmt_ee (code, TFmode, operands[1],
3093 operands[2]));
3094 }
3095
3096 /* Emit an X_floating library function call for a comparison. */
3097
3098 static rtx
3099 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3100 {
3101 enum rtx_code cmp_code, res_code;
3102 rtx func, out, operands[2], note;
3103
3104 /* X_floating library comparison functions return
3105 -1 unordered
3106 0 false
3107 1 true
3108 Convert the compare against the raw return value. */
3109
3110 cmp_code = *pcode;
3111 switch (cmp_code)
3112 {
3113 case UNORDERED:
3114 cmp_code = EQ;
3115 res_code = LT;
3116 break;
3117 case ORDERED:
3118 cmp_code = EQ;
3119 res_code = GE;
3120 break;
3121 case NE:
3122 res_code = NE;
3123 break;
3124 case EQ:
3125 case LT:
3126 case GT:
3127 case LE:
3128 case GE:
3129 res_code = GT;
3130 break;
3131 default:
3132 gcc_unreachable ();
3133 }
3134 *pcode = res_code;
3135
3136 func = alpha_lookup_xfloating_lib_func (cmp_code);
3137
3138 operands[0] = op0;
3139 operands[1] = op1;
3140 out = gen_reg_rtx (DImode);
3141
3142 /* What's actually returned is -1,0,1, not a proper boolean value,
3143 so use an EXPR_LIST as with a generic libcall instead of a
3144 comparison type expression. */
3145 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3146 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3147 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3148 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3149
3150 return out;
3151 }
3152
3153 /* Emit an X_floating library function call for a conversion. */
3154
3155 void
3156 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3157 {
3158 int noperands = 1, mode;
3159 rtx out_operands[2];
3160 rtx func;
3161 enum rtx_code code = orig_code;
3162
3163 if (code == UNSIGNED_FIX)
3164 code = FIX;
3165
3166 func = alpha_lookup_xfloating_lib_func (code);
3167
3168 out_operands[0] = operands[1];
3169
3170 switch (code)
3171 {
3172 case FIX:
3173 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3174 out_operands[1] = GEN_INT (mode);
3175 noperands = 2;
3176 break;
3177 case FLOAT_TRUNCATE:
3178 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3179 out_operands[1] = GEN_INT (mode);
3180 noperands = 2;
3181 break;
3182 default:
3183 break;
3184 }
3185
3186 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3187 gen_rtx_fmt_e (orig_code,
3188 GET_MODE (operands[0]),
3189 operands[1]));
3190 }
3191
3192 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3193 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3194 guarantee that the sequence
3195 set (OP[0] OP[2])
3196 set (OP[1] OP[3])
3197 is valid. Naturally, output operand ordering is little-endian.
3198 This is used by *movtf_internal and *movti_internal. */
3199
3200 void
3201 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3202 bool fixup_overlap)
3203 {
3204 switch (GET_CODE (operands[1]))
3205 {
3206 case REG:
3207 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3208 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3209 break;
3210
3211 case MEM:
3212 operands[3] = adjust_address (operands[1], DImode, 8);
3213 operands[2] = adjust_address (operands[1], DImode, 0);
3214 break;
3215
3216 case CONST_INT:
3217 case CONST_DOUBLE:
3218 gcc_assert (operands[1] == CONST0_RTX (mode));
3219 operands[2] = operands[3] = const0_rtx;
3220 break;
3221
3222 default:
3223 gcc_unreachable ();
3224 }
3225
3226 switch (GET_CODE (operands[0]))
3227 {
3228 case REG:
3229 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3230 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3231 break;
3232
3233 case MEM:
3234 operands[1] = adjust_address (operands[0], DImode, 8);
3235 operands[0] = adjust_address (operands[0], DImode, 0);
3236 break;
3237
3238 default:
3239 gcc_unreachable ();
3240 }
3241
3242 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3243 {
3244 rtx tmp;
3245 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3246 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3247 }
3248 }
3249
3250 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3251 op2 is a register containing the sign bit, operation is the
3252 logical operation to be performed. */
3253
3254 void
3255 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3256 {
3257 rtx high_bit = operands[2];
3258 rtx scratch;
3259 int move;
3260
3261 alpha_split_tmode_pair (operands, TFmode, false);
3262
3263 /* Detect three flavors of operand overlap. */
3264 move = 1;
3265 if (rtx_equal_p (operands[0], operands[2]))
3266 move = 0;
3267 else if (rtx_equal_p (operands[1], operands[2]))
3268 {
3269 if (rtx_equal_p (operands[0], high_bit))
3270 move = 2;
3271 else
3272 move = -1;
3273 }
3274
3275 if (move < 0)
3276 emit_move_insn (operands[0], operands[2]);
3277
3278 /* ??? If the destination overlaps both source tf and high_bit, then
3279 assume source tf is dead in its entirety and use the other half
3280 for a scratch register. Otherwise "scratch" is just the proper
3281 destination register. */
3282 scratch = operands[move < 2 ? 1 : 3];
3283
3284 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3285
3286 if (move > 0)
3287 {
3288 emit_move_insn (operands[0], operands[2]);
3289 if (move > 1)
3290 emit_move_insn (operands[1], scratch);
3291 }
3292 }
3293 \f
3294 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3295 unaligned data:
3296
3297 unsigned: signed:
3298 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3299 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3300 lda r3,X(r11) lda r3,X+2(r11)
3301 extwl r1,r3,r1 extql r1,r3,r1
3302 extwh r2,r3,r2 extqh r2,r3,r2
3303 or r1.r2.r1 or r1,r2,r1
3304 sra r1,48,r1
3305
3306 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3307 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3308 lda r3,X(r11) lda r3,X(r11)
3309 extll r1,r3,r1 extll r1,r3,r1
3310 extlh r2,r3,r2 extlh r2,r3,r2
3311 or r1.r2.r1 addl r1,r2,r1
3312
3313 quad: ldq_u r1,X(r11)
3314 ldq_u r2,X+7(r11)
3315 lda r3,X(r11)
3316 extql r1,r3,r1
3317 extqh r2,r3,r2
3318 or r1.r2.r1
3319 */
3320
3321 void
3322 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3323 HOST_WIDE_INT ofs, int sign)
3324 {
3325 rtx meml, memh, addr, extl, exth, tmp, mema;
3326 enum machine_mode mode;
3327
3328 if (TARGET_BWX && size == 2)
3329 {
3330 meml = adjust_address (mem, QImode, ofs);
3331 memh = adjust_address (mem, QImode, ofs+1);
3332 if (BYTES_BIG_ENDIAN)
3333 tmp = meml, meml = memh, memh = tmp;
3334 extl = gen_reg_rtx (DImode);
3335 exth = gen_reg_rtx (DImode);
3336 emit_insn (gen_zero_extendqidi2 (extl, meml));
3337 emit_insn (gen_zero_extendqidi2 (exth, memh));
3338 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3339 NULL, 1, OPTAB_LIB_WIDEN);
3340 addr = expand_simple_binop (DImode, IOR, extl, exth,
3341 NULL, 1, OPTAB_LIB_WIDEN);
3342
3343 if (sign && GET_MODE (tgt) != HImode)
3344 {
3345 addr = gen_lowpart (HImode, addr);
3346 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3347 }
3348 else
3349 {
3350 if (GET_MODE (tgt) != DImode)
3351 addr = gen_lowpart (GET_MODE (tgt), addr);
3352 emit_move_insn (tgt, addr);
3353 }
3354 return;
3355 }
3356
3357 meml = gen_reg_rtx (DImode);
3358 memh = gen_reg_rtx (DImode);
3359 addr = gen_reg_rtx (DImode);
3360 extl = gen_reg_rtx (DImode);
3361 exth = gen_reg_rtx (DImode);
3362
3363 mema = XEXP (mem, 0);
3364 if (GET_CODE (mema) == LO_SUM)
3365 mema = force_reg (Pmode, mema);
3366
3367 /* AND addresses cannot be in any alias set, since they may implicitly
3368 alias surrounding code. Ideally we'd have some alias set that
3369 covered all types except those with alignment 8 or higher. */
3370
3371 tmp = change_address (mem, DImode,
3372 gen_rtx_AND (DImode,
3373 plus_constant (mema, ofs),
3374 GEN_INT (-8)));
3375 set_mem_alias_set (tmp, 0);
3376 emit_move_insn (meml, tmp);
3377
3378 tmp = change_address (mem, DImode,
3379 gen_rtx_AND (DImode,
3380 plus_constant (mema, ofs + size - 1),
3381 GEN_INT (-8)));
3382 set_mem_alias_set (tmp, 0);
3383 emit_move_insn (memh, tmp);
3384
3385 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3386 {
3387 emit_move_insn (addr, plus_constant (mema, -1));
3388
3389 emit_insn (gen_extqh_be (extl, meml, addr));
3390 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3391
3392 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3393 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3394 addr, 1, OPTAB_WIDEN);
3395 }
3396 else if (sign && size == 2)
3397 {
3398 emit_move_insn (addr, plus_constant (mema, ofs+2));
3399
3400 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3401 emit_insn (gen_extqh_le (exth, memh, addr));
3402
3403 /* We must use tgt here for the target. Alpha-vms port fails if we use
3404 addr for the target, because addr is marked as a pointer and combine
3405 knows that pointers are always sign-extended 32-bit values. */
3406 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3407 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3408 addr, 1, OPTAB_WIDEN);
3409 }
3410 else
3411 {
3412 if (WORDS_BIG_ENDIAN)
3413 {
3414 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3415 switch ((int) size)
3416 {
3417 case 2:
3418 emit_insn (gen_extwh_be (extl, meml, addr));
3419 mode = HImode;
3420 break;
3421
3422 case 4:
3423 emit_insn (gen_extlh_be (extl, meml, addr));
3424 mode = SImode;
3425 break;
3426
3427 case 8:
3428 emit_insn (gen_extqh_be (extl, meml, addr));
3429 mode = DImode;
3430 break;
3431
3432 default:
3433 gcc_unreachable ();
3434 }
3435 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3436 }
3437 else
3438 {
3439 emit_move_insn (addr, plus_constant (mema, ofs));
3440 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3441 switch ((int) size)
3442 {
3443 case 2:
3444 emit_insn (gen_extwh_le (exth, memh, addr));
3445 mode = HImode;
3446 break;
3447
3448 case 4:
3449 emit_insn (gen_extlh_le (exth, memh, addr));
3450 mode = SImode;
3451 break;
3452
3453 case 8:
3454 emit_insn (gen_extqh_le (exth, memh, addr));
3455 mode = DImode;
3456 break;
3457
3458 default:
3459 gcc_unreachable ();
3460 }
3461 }
3462
3463 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3464 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3465 sign, OPTAB_WIDEN);
3466 }
3467
3468 if (addr != tgt)
3469 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3470 }
3471
3472 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3473
3474 void
3475 alpha_expand_unaligned_store (rtx dst, rtx src,
3476 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3477 {
3478 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3479
3480 if (TARGET_BWX && size == 2)
3481 {
3482 if (src != const0_rtx)
3483 {
3484 dstl = gen_lowpart (QImode, src);
3485 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3486 NULL, 1, OPTAB_LIB_WIDEN);
3487 dsth = gen_lowpart (QImode, dsth);
3488 }
3489 else
3490 dstl = dsth = const0_rtx;
3491
3492 meml = adjust_address (dst, QImode, ofs);
3493 memh = adjust_address (dst, QImode, ofs+1);
3494 if (BYTES_BIG_ENDIAN)
3495 addr = meml, meml = memh, memh = addr;
3496
3497 emit_move_insn (meml, dstl);
3498 emit_move_insn (memh, dsth);
3499 return;
3500 }
3501
3502 dstl = gen_reg_rtx (DImode);
3503 dsth = gen_reg_rtx (DImode);
3504 insl = gen_reg_rtx (DImode);
3505 insh = gen_reg_rtx (DImode);
3506
3507 dsta = XEXP (dst, 0);
3508 if (GET_CODE (dsta) == LO_SUM)
3509 dsta = force_reg (Pmode, dsta);
3510
3511 /* AND addresses cannot be in any alias set, since they may implicitly
3512 alias surrounding code. Ideally we'd have some alias set that
3513 covered all types except those with alignment 8 or higher. */
3514
3515 meml = change_address (dst, DImode,
3516 gen_rtx_AND (DImode,
3517 plus_constant (dsta, ofs),
3518 GEN_INT (-8)));
3519 set_mem_alias_set (meml, 0);
3520
3521 memh = change_address (dst, DImode,
3522 gen_rtx_AND (DImode,
3523 plus_constant (dsta, ofs + size - 1),
3524 GEN_INT (-8)));
3525 set_mem_alias_set (memh, 0);
3526
3527 emit_move_insn (dsth, memh);
3528 emit_move_insn (dstl, meml);
3529 if (WORDS_BIG_ENDIAN)
3530 {
3531 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3532
3533 if (src != const0_rtx)
3534 {
3535 switch ((int) size)
3536 {
3537 case 2:
3538 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3539 break;
3540 case 4:
3541 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3542 break;
3543 case 8:
3544 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3545 break;
3546 }
3547 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3548 GEN_INT (size*8), addr));
3549 }
3550
3551 switch ((int) size)
3552 {
3553 case 2:
3554 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3555 break;
3556 case 4:
3557 {
3558 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3559 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3560 break;
3561 }
3562 case 8:
3563 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3564 break;
3565 }
3566
3567 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3568 }
3569 else
3570 {
3571 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3572
3573 if (src != CONST0_RTX (GET_MODE (src)))
3574 {
3575 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3576 GEN_INT (size*8), addr));
3577
3578 switch ((int) size)
3579 {
3580 case 2:
3581 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3582 break;
3583 case 4:
3584 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3585 break;
3586 case 8:
3587 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3588 break;
3589 }
3590 }
3591
3592 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3593
3594 switch ((int) size)
3595 {
3596 case 2:
3597 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3598 break;
3599 case 4:
3600 {
3601 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3602 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3603 break;
3604 }
3605 case 8:
3606 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3607 break;
3608 }
3609 }
3610
3611 if (src != CONST0_RTX (GET_MODE (src)))
3612 {
3613 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3614 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3615 }
3616
3617 if (WORDS_BIG_ENDIAN)
3618 {
3619 emit_move_insn (meml, dstl);
3620 emit_move_insn (memh, dsth);
3621 }
3622 else
3623 {
3624 /* Must store high before low for degenerate case of aligned. */
3625 emit_move_insn (memh, dsth);
3626 emit_move_insn (meml, dstl);
3627 }
3628 }
3629
3630 /* The block move code tries to maximize speed by separating loads and
3631 stores at the expense of register pressure: we load all of the data
3632 before we store it back out. There are two secondary effects worth
3633 mentioning, that this speeds copying to/from aligned and unaligned
3634 buffers, and that it makes the code significantly easier to write. */
3635
3636 #define MAX_MOVE_WORDS 8
3637
3638 /* Load an integral number of consecutive unaligned quadwords. */
3639
3640 static void
3641 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3642 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3643 {
3644 rtx const im8 = GEN_INT (-8);
3645 rtx const i64 = GEN_INT (64);
3646 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3647 rtx sreg, areg, tmp, smema;
3648 HOST_WIDE_INT i;
3649
3650 smema = XEXP (smem, 0);
3651 if (GET_CODE (smema) == LO_SUM)
3652 smema = force_reg (Pmode, smema);
3653
3654 /* Generate all the tmp registers we need. */
3655 for (i = 0; i < words; ++i)
3656 {
3657 data_regs[i] = out_regs[i];
3658 ext_tmps[i] = gen_reg_rtx (DImode);
3659 }
3660 data_regs[words] = gen_reg_rtx (DImode);
3661
3662 if (ofs != 0)
3663 smem = adjust_address (smem, GET_MODE (smem), ofs);
3664
3665 /* Load up all of the source data. */
3666 for (i = 0; i < words; ++i)
3667 {
3668 tmp = change_address (smem, DImode,
3669 gen_rtx_AND (DImode,
3670 plus_constant (smema, 8*i),
3671 im8));
3672 set_mem_alias_set (tmp, 0);
3673 emit_move_insn (data_regs[i], tmp);
3674 }
3675
3676 tmp = change_address (smem, DImode,
3677 gen_rtx_AND (DImode,
3678 plus_constant (smema, 8*words - 1),
3679 im8));
3680 set_mem_alias_set (tmp, 0);
3681 emit_move_insn (data_regs[words], tmp);
3682
3683 /* Extract the half-word fragments. Unfortunately DEC decided to make
3684 extxh with offset zero a noop instead of zeroing the register, so
3685 we must take care of that edge condition ourselves with cmov. */
3686
3687 sreg = copy_addr_to_reg (smema);
3688 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3689 1, OPTAB_WIDEN);
3690 if (WORDS_BIG_ENDIAN)
3691 emit_move_insn (sreg, plus_constant (sreg, 7));
3692 for (i = 0; i < words; ++i)
3693 {
3694 if (WORDS_BIG_ENDIAN)
3695 {
3696 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3697 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3698 }
3699 else
3700 {
3701 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3702 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3703 }
3704 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3705 gen_rtx_IF_THEN_ELSE (DImode,
3706 gen_rtx_EQ (DImode, areg,
3707 const0_rtx),
3708 const0_rtx, ext_tmps[i])));
3709 }
3710
3711 /* Merge the half-words into whole words. */
3712 for (i = 0; i < words; ++i)
3713 {
3714 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3715 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3716 }
3717 }
3718
3719 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3720 may be NULL to store zeros. */
3721
3722 static void
3723 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3724 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3725 {
3726 rtx const im8 = GEN_INT (-8);
3727 rtx const i64 = GEN_INT (64);
3728 rtx ins_tmps[MAX_MOVE_WORDS];
3729 rtx st_tmp_1, st_tmp_2, dreg;
3730 rtx st_addr_1, st_addr_2, dmema;
3731 HOST_WIDE_INT i;
3732
3733 dmema = XEXP (dmem, 0);
3734 if (GET_CODE (dmema) == LO_SUM)
3735 dmema = force_reg (Pmode, dmema);
3736
3737 /* Generate all the tmp registers we need. */
3738 if (data_regs != NULL)
3739 for (i = 0; i < words; ++i)
3740 ins_tmps[i] = gen_reg_rtx(DImode);
3741 st_tmp_1 = gen_reg_rtx(DImode);
3742 st_tmp_2 = gen_reg_rtx(DImode);
3743
3744 if (ofs != 0)
3745 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3746
3747 st_addr_2 = change_address (dmem, DImode,
3748 gen_rtx_AND (DImode,
3749 plus_constant (dmema, words*8 - 1),
3750 im8));
3751 set_mem_alias_set (st_addr_2, 0);
3752
3753 st_addr_1 = change_address (dmem, DImode,
3754 gen_rtx_AND (DImode, dmema, im8));
3755 set_mem_alias_set (st_addr_1, 0);
3756
3757 /* Load up the destination end bits. */
3758 emit_move_insn (st_tmp_2, st_addr_2);
3759 emit_move_insn (st_tmp_1, st_addr_1);
3760
3761 /* Shift the input data into place. */
3762 dreg = copy_addr_to_reg (dmema);
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (dreg, plus_constant (dreg, 7));
3765 if (data_regs != NULL)
3766 {
3767 for (i = words-1; i >= 0; --i)
3768 {
3769 if (WORDS_BIG_ENDIAN)
3770 {
3771 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3772 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3773 }
3774 else
3775 {
3776 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3777 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3778 }
3779 }
3780 for (i = words-1; i > 0; --i)
3781 {
3782 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3783 ins_tmps[i-1], ins_tmps[i-1], 1,
3784 OPTAB_WIDEN);
3785 }
3786 }
3787
3788 /* Split and merge the ends with the destination data. */
3789 if (WORDS_BIG_ENDIAN)
3790 {
3791 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3792 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3793 }
3794 else
3795 {
3796 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3797 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3798 }
3799
3800 if (data_regs != NULL)
3801 {
3802 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3803 st_tmp_2, 1, OPTAB_WIDEN);
3804 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3805 st_tmp_1, 1, OPTAB_WIDEN);
3806 }
3807
3808 /* Store it all. */
3809 if (WORDS_BIG_ENDIAN)
3810 emit_move_insn (st_addr_1, st_tmp_1);
3811 else
3812 emit_move_insn (st_addr_2, st_tmp_2);
3813 for (i = words-1; i > 0; --i)
3814 {
3815 rtx tmp = change_address (dmem, DImode,
3816 gen_rtx_AND (DImode,
3817 plus_constant(dmema,
3818 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3819 im8));
3820 set_mem_alias_set (tmp, 0);
3821 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3822 }
3823 if (WORDS_BIG_ENDIAN)
3824 emit_move_insn (st_addr_2, st_tmp_2);
3825 else
3826 emit_move_insn (st_addr_1, st_tmp_1);
3827 }
3828
3829
3830 /* Expand string/block move operations.
3831
3832 operands[0] is the pointer to the destination.
3833 operands[1] is the pointer to the source.
3834 operands[2] is the number of bytes to move.
3835 operands[3] is the alignment. */
3836
3837 int
3838 alpha_expand_block_move (rtx operands[])
3839 {
3840 rtx bytes_rtx = operands[2];
3841 rtx align_rtx = operands[3];
3842 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3843 HOST_WIDE_INT bytes = orig_bytes;
3844 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3845 HOST_WIDE_INT dst_align = src_align;
3846 rtx orig_src = operands[1];
3847 rtx orig_dst = operands[0];
3848 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3849 rtx tmp;
3850 unsigned int i, words, ofs, nregs = 0;
3851
3852 if (orig_bytes <= 0)
3853 return 1;
3854 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3855 return 0;
3856
3857 /* Look for additional alignment information from recorded register info. */
3858
3859 tmp = XEXP (orig_src, 0);
3860 if (REG_P (tmp))
3861 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3862 else if (GET_CODE (tmp) == PLUS
3863 && REG_P (XEXP (tmp, 0))
3864 && CONST_INT_P (XEXP (tmp, 1)))
3865 {
3866 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3867 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3868
3869 if (a > src_align)
3870 {
3871 if (a >= 64 && c % 8 == 0)
3872 src_align = 64;
3873 else if (a >= 32 && c % 4 == 0)
3874 src_align = 32;
3875 else if (a >= 16 && c % 2 == 0)
3876 src_align = 16;
3877 }
3878 }
3879
3880 tmp = XEXP (orig_dst, 0);
3881 if (REG_P (tmp))
3882 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3883 else if (GET_CODE (tmp) == PLUS
3884 && REG_P (XEXP (tmp, 0))
3885 && CONST_INT_P (XEXP (tmp, 1)))
3886 {
3887 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3888 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3889
3890 if (a > dst_align)
3891 {
3892 if (a >= 64 && c % 8 == 0)
3893 dst_align = 64;
3894 else if (a >= 32 && c % 4 == 0)
3895 dst_align = 32;
3896 else if (a >= 16 && c % 2 == 0)
3897 dst_align = 16;
3898 }
3899 }
3900
3901 ofs = 0;
3902 if (src_align >= 64 && bytes >= 8)
3903 {
3904 words = bytes / 8;
3905
3906 for (i = 0; i < words; ++i)
3907 data_regs[nregs + i] = gen_reg_rtx (DImode);
3908
3909 for (i = 0; i < words; ++i)
3910 emit_move_insn (data_regs[nregs + i],
3911 adjust_address (orig_src, DImode, ofs + i * 8));
3912
3913 nregs += words;
3914 bytes -= words * 8;
3915 ofs += words * 8;
3916 }
3917
3918 if (src_align >= 32 && bytes >= 4)
3919 {
3920 words = bytes / 4;
3921
3922 for (i = 0; i < words; ++i)
3923 data_regs[nregs + i] = gen_reg_rtx (SImode);
3924
3925 for (i = 0; i < words; ++i)
3926 emit_move_insn (data_regs[nregs + i],
3927 adjust_address (orig_src, SImode, ofs + i * 4));
3928
3929 nregs += words;
3930 bytes -= words * 4;
3931 ofs += words * 4;
3932 }
3933
3934 if (bytes >= 8)
3935 {
3936 words = bytes / 8;
3937
3938 for (i = 0; i < words+1; ++i)
3939 data_regs[nregs + i] = gen_reg_rtx (DImode);
3940
3941 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3942 words, ofs);
3943
3944 nregs += words;
3945 bytes -= words * 8;
3946 ofs += words * 8;
3947 }
3948
3949 if (! TARGET_BWX && bytes >= 4)
3950 {
3951 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3952 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3953 bytes -= 4;
3954 ofs += 4;
3955 }
3956
3957 if (bytes >= 2)
3958 {
3959 if (src_align >= 16)
3960 {
3961 do {
3962 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3963 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3964 bytes -= 2;
3965 ofs += 2;
3966 } while (bytes >= 2);
3967 }
3968 else if (! TARGET_BWX)
3969 {
3970 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3971 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3972 bytes -= 2;
3973 ofs += 2;
3974 }
3975 }
3976
3977 while (bytes > 0)
3978 {
3979 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3980 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3981 bytes -= 1;
3982 ofs += 1;
3983 }
3984
3985 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3986
3987 /* Now save it back out again. */
3988
3989 i = 0, ofs = 0;
3990
3991 /* Write out the data in whatever chunks reading the source allowed. */
3992 if (dst_align >= 64)
3993 {
3994 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3995 {
3996 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3997 data_regs[i]);
3998 ofs += 8;
3999 i++;
4000 }
4001 }
4002
4003 if (dst_align >= 32)
4004 {
4005 /* If the source has remaining DImode regs, write them out in
4006 two pieces. */
4007 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
4008 {
4009 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4010 NULL_RTX, 1, OPTAB_WIDEN);
4011
4012 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4013 gen_lowpart (SImode, data_regs[i]));
4014 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4015 gen_lowpart (SImode, tmp));
4016 ofs += 8;
4017 i++;
4018 }
4019
4020 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4021 {
4022 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4023 data_regs[i]);
4024 ofs += 4;
4025 i++;
4026 }
4027 }
4028
4029 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4030 {
4031 /* Write out a remaining block of words using unaligned methods. */
4032
4033 for (words = 1; i + words < nregs; words++)
4034 if (GET_MODE (data_regs[i + words]) != DImode)
4035 break;
4036
4037 if (words == 1)
4038 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4039 else
4040 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4041 words, ofs);
4042
4043 i += words;
4044 ofs += words * 8;
4045 }
4046
4047 /* Due to the above, this won't be aligned. */
4048 /* ??? If we have more than one of these, consider constructing full
4049 words in registers and using alpha_expand_unaligned_store_words. */
4050 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4051 {
4052 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4053 ofs += 4;
4054 i++;
4055 }
4056
4057 if (dst_align >= 16)
4058 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4059 {
4060 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4061 i++;
4062 ofs += 2;
4063 }
4064 else
4065 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4066 {
4067 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4068 i++;
4069 ofs += 2;
4070 }
4071
4072 /* The remainder must be byte copies. */
4073 while (i < nregs)
4074 {
4075 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4076 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4077 i++;
4078 ofs += 1;
4079 }
4080
4081 return 1;
4082 }
4083
4084 int
4085 alpha_expand_block_clear (rtx operands[])
4086 {
4087 rtx bytes_rtx = operands[1];
4088 rtx align_rtx = operands[3];
4089 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4090 HOST_WIDE_INT bytes = orig_bytes;
4091 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4092 HOST_WIDE_INT alignofs = 0;
4093 rtx orig_dst = operands[0];
4094 rtx tmp;
4095 int i, words, ofs = 0;
4096
4097 if (orig_bytes <= 0)
4098 return 1;
4099 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4100 return 0;
4101
4102 /* Look for stricter alignment. */
4103 tmp = XEXP (orig_dst, 0);
4104 if (REG_P (tmp))
4105 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4106 else if (GET_CODE (tmp) == PLUS
4107 && REG_P (XEXP (tmp, 0))
4108 && CONST_INT_P (XEXP (tmp, 1)))
4109 {
4110 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4111 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4112
4113 if (a > align)
4114 {
4115 if (a >= 64)
4116 align = a, alignofs = 8 - c % 8;
4117 else if (a >= 32)
4118 align = a, alignofs = 4 - c % 4;
4119 else if (a >= 16)
4120 align = a, alignofs = 2 - c % 2;
4121 }
4122 }
4123
4124 /* Handle an unaligned prefix first. */
4125
4126 if (alignofs > 0)
4127 {
4128 #if HOST_BITS_PER_WIDE_INT >= 64
4129 /* Given that alignofs is bounded by align, the only time BWX could
4130 generate three stores is for a 7 byte fill. Prefer two individual
4131 stores over a load/mask/store sequence. */
4132 if ((!TARGET_BWX || alignofs == 7)
4133 && align >= 32
4134 && !(alignofs == 4 && bytes >= 4))
4135 {
4136 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4137 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4138 rtx mem, tmp;
4139 HOST_WIDE_INT mask;
4140
4141 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4142 set_mem_alias_set (mem, 0);
4143
4144 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4145 if (bytes < alignofs)
4146 {
4147 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4148 ofs += bytes;
4149 bytes = 0;
4150 }
4151 else
4152 {
4153 bytes -= alignofs;
4154 ofs += alignofs;
4155 }
4156 alignofs = 0;
4157
4158 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4159 NULL_RTX, 1, OPTAB_WIDEN);
4160
4161 emit_move_insn (mem, tmp);
4162 }
4163 #endif
4164
4165 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4166 {
4167 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4168 bytes -= 1;
4169 ofs += 1;
4170 alignofs -= 1;
4171 }
4172 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4173 {
4174 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4175 bytes -= 2;
4176 ofs += 2;
4177 alignofs -= 2;
4178 }
4179 if (alignofs == 4 && bytes >= 4)
4180 {
4181 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4182 bytes -= 4;
4183 ofs += 4;
4184 alignofs = 0;
4185 }
4186
4187 /* If we've not used the extra lead alignment information by now,
4188 we won't be able to. Downgrade align to match what's left over. */
4189 if (alignofs > 0)
4190 {
4191 alignofs = alignofs & -alignofs;
4192 align = MIN (align, alignofs * BITS_PER_UNIT);
4193 }
4194 }
4195
4196 /* Handle a block of contiguous long-words. */
4197
4198 if (align >= 64 && bytes >= 8)
4199 {
4200 words = bytes / 8;
4201
4202 for (i = 0; i < words; ++i)
4203 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4204 const0_rtx);
4205
4206 bytes -= words * 8;
4207 ofs += words * 8;
4208 }
4209
4210 /* If the block is large and appropriately aligned, emit a single
4211 store followed by a sequence of stq_u insns. */
4212
4213 if (align >= 32 && bytes > 16)
4214 {
4215 rtx orig_dsta;
4216
4217 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4218 bytes -= 4;
4219 ofs += 4;
4220
4221 orig_dsta = XEXP (orig_dst, 0);
4222 if (GET_CODE (orig_dsta) == LO_SUM)
4223 orig_dsta = force_reg (Pmode, orig_dsta);
4224
4225 words = bytes / 8;
4226 for (i = 0; i < words; ++i)
4227 {
4228 rtx mem
4229 = change_address (orig_dst, DImode,
4230 gen_rtx_AND (DImode,
4231 plus_constant (orig_dsta, ofs + i*8),
4232 GEN_INT (-8)));
4233 set_mem_alias_set (mem, 0);
4234 emit_move_insn (mem, const0_rtx);
4235 }
4236
4237 /* Depending on the alignment, the first stq_u may have overlapped
4238 with the initial stl, which means that the last stq_u didn't
4239 write as much as it would appear. Leave those questionable bytes
4240 unaccounted for. */
4241 bytes -= words * 8 - 4;
4242 ofs += words * 8 - 4;
4243 }
4244
4245 /* Handle a smaller block of aligned words. */
4246
4247 if ((align >= 64 && bytes == 4)
4248 || (align == 32 && bytes >= 4))
4249 {
4250 words = bytes / 4;
4251
4252 for (i = 0; i < words; ++i)
4253 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4254 const0_rtx);
4255
4256 bytes -= words * 4;
4257 ofs += words * 4;
4258 }
4259
4260 /* An unaligned block uses stq_u stores for as many as possible. */
4261
4262 if (bytes >= 8)
4263 {
4264 words = bytes / 8;
4265
4266 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4267
4268 bytes -= words * 8;
4269 ofs += words * 8;
4270 }
4271
4272 /* Next clean up any trailing pieces. */
4273
4274 #if HOST_BITS_PER_WIDE_INT >= 64
4275 /* Count the number of bits in BYTES for which aligned stores could
4276 be emitted. */
4277 words = 0;
4278 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4279 if (bytes & i)
4280 words += 1;
4281
4282 /* If we have appropriate alignment (and it wouldn't take too many
4283 instructions otherwise), mask out the bytes we need. */
4284 if (TARGET_BWX ? words > 2 : bytes > 0)
4285 {
4286 if (align >= 64)
4287 {
4288 rtx mem, tmp;
4289 HOST_WIDE_INT mask;
4290
4291 mem = adjust_address (orig_dst, DImode, ofs);
4292 set_mem_alias_set (mem, 0);
4293
4294 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4295
4296 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4297 NULL_RTX, 1, OPTAB_WIDEN);
4298
4299 emit_move_insn (mem, tmp);
4300 return 1;
4301 }
4302 else if (align >= 32 && bytes < 4)
4303 {
4304 rtx mem, tmp;
4305 HOST_WIDE_INT mask;
4306
4307 mem = adjust_address (orig_dst, SImode, ofs);
4308 set_mem_alias_set (mem, 0);
4309
4310 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4311
4312 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4313 NULL_RTX, 1, OPTAB_WIDEN);
4314
4315 emit_move_insn (mem, tmp);
4316 return 1;
4317 }
4318 }
4319 #endif
4320
4321 if (!TARGET_BWX && bytes >= 4)
4322 {
4323 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4324 bytes -= 4;
4325 ofs += 4;
4326 }
4327
4328 if (bytes >= 2)
4329 {
4330 if (align >= 16)
4331 {
4332 do {
4333 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4334 const0_rtx);
4335 bytes -= 2;
4336 ofs += 2;
4337 } while (bytes >= 2);
4338 }
4339 else if (! TARGET_BWX)
4340 {
4341 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4342 bytes -= 2;
4343 ofs += 2;
4344 }
4345 }
4346
4347 while (bytes > 0)
4348 {
4349 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4350 bytes -= 1;
4351 ofs += 1;
4352 }
4353
4354 return 1;
4355 }
4356
4357 /* Returns a mask so that zap(x, value) == x & mask. */
4358
4359 rtx
4360 alpha_expand_zap_mask (HOST_WIDE_INT value)
4361 {
4362 rtx result;
4363 int i;
4364
4365 if (HOST_BITS_PER_WIDE_INT >= 64)
4366 {
4367 HOST_WIDE_INT mask = 0;
4368
4369 for (i = 7; i >= 0; --i)
4370 {
4371 mask <<= 8;
4372 if (!((value >> i) & 1))
4373 mask |= 0xff;
4374 }
4375
4376 result = gen_int_mode (mask, DImode);
4377 }
4378 else
4379 {
4380 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4381
4382 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4383
4384 for (i = 7; i >= 4; --i)
4385 {
4386 mask_hi <<= 8;
4387 if (!((value >> i) & 1))
4388 mask_hi |= 0xff;
4389 }
4390
4391 for (i = 3; i >= 0; --i)
4392 {
4393 mask_lo <<= 8;
4394 if (!((value >> i) & 1))
4395 mask_lo |= 0xff;
4396 }
4397
4398 result = immed_double_const (mask_lo, mask_hi, DImode);
4399 }
4400
4401 return result;
4402 }
4403
4404 void
4405 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4406 enum machine_mode mode,
4407 rtx op0, rtx op1, rtx op2)
4408 {
4409 op0 = gen_lowpart (mode, op0);
4410
4411 if (op1 == const0_rtx)
4412 op1 = CONST0_RTX (mode);
4413 else
4414 op1 = gen_lowpart (mode, op1);
4415
4416 if (op2 == const0_rtx)
4417 op2 = CONST0_RTX (mode);
4418 else
4419 op2 = gen_lowpart (mode, op2);
4420
4421 emit_insn ((*gen) (op0, op1, op2));
4422 }
4423
4424 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4425 COND is true. Mark the jump as unlikely to be taken. */
4426
4427 static void
4428 emit_unlikely_jump (rtx cond, rtx label)
4429 {
4430 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4431 rtx x;
4432
4433 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4434 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4435 add_reg_note (x, REG_BR_PROB, very_unlikely);
4436 }
4437
4438 /* A subroutine of the atomic operation splitters. Emit a load-locked
4439 instruction in MODE. */
4440
4441 static void
4442 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4443 {
4444 rtx (*fn) (rtx, rtx) = NULL;
4445 if (mode == SImode)
4446 fn = gen_load_locked_si;
4447 else if (mode == DImode)
4448 fn = gen_load_locked_di;
4449 emit_insn (fn (reg, mem));
4450 }
4451
4452 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4453 instruction in MODE. */
4454
4455 static void
4456 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4457 {
4458 rtx (*fn) (rtx, rtx, rtx) = NULL;
4459 if (mode == SImode)
4460 fn = gen_store_conditional_si;
4461 else if (mode == DImode)
4462 fn = gen_store_conditional_di;
4463 emit_insn (fn (res, mem, val));
4464 }
4465
4466 /* A subroutine of the atomic operation splitters. Emit an insxl
4467 instruction in MODE. */
4468
4469 static rtx
4470 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4471 {
4472 rtx ret = gen_reg_rtx (DImode);
4473 rtx (*fn) (rtx, rtx, rtx);
4474
4475 if (WORDS_BIG_ENDIAN)
4476 {
4477 if (mode == QImode)
4478 fn = gen_insbl_be;
4479 else
4480 fn = gen_inswl_be;
4481 }
4482 else
4483 {
4484 if (mode == QImode)
4485 fn = gen_insbl_le;
4486 else
4487 fn = gen_inswl_le;
4488 }
4489 /* The insbl and inswl patterns require a register operand. */
4490 op1 = force_reg (mode, op1);
4491 emit_insn (fn (ret, op1, op2));
4492
4493 return ret;
4494 }
4495
4496 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4497 to perform. MEM is the memory on which to operate. VAL is the second
4498 operand of the binary operator. BEFORE and AFTER are optional locations to
4499 return the value of MEM either before of after the operation. SCRATCH is
4500 a scratch register. */
4501
4502 void
4503 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4504 rtx before, rtx after, rtx scratch)
4505 {
4506 enum machine_mode mode = GET_MODE (mem);
4507 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4508
4509 emit_insn (gen_memory_barrier ());
4510
4511 label = gen_label_rtx ();
4512 emit_label (label);
4513 label = gen_rtx_LABEL_REF (DImode, label);
4514
4515 if (before == NULL)
4516 before = scratch;
4517 emit_load_locked (mode, before, mem);
4518
4519 if (code == NOT)
4520 {
4521 x = gen_rtx_AND (mode, before, val);
4522 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4523
4524 x = gen_rtx_NOT (mode, val);
4525 }
4526 else
4527 x = gen_rtx_fmt_ee (code, mode, before, val);
4528 if (after)
4529 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4530 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4531
4532 emit_store_conditional (mode, cond, mem, scratch);
4533
4534 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4535 emit_unlikely_jump (x, label);
4536
4537 emit_insn (gen_memory_barrier ());
4538 }
4539
4540 /* Expand a compare and swap operation. */
4541
4542 void
4543 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4544 rtx scratch)
4545 {
4546 enum machine_mode mode = GET_MODE (mem);
4547 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4548
4549 emit_insn (gen_memory_barrier ());
4550
4551 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4552 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4553 emit_label (XEXP (label1, 0));
4554
4555 emit_load_locked (mode, retval, mem);
4556
4557 x = gen_lowpart (DImode, retval);
4558 if (oldval == const0_rtx)
4559 x = gen_rtx_NE (DImode, x, const0_rtx);
4560 else
4561 {
4562 x = gen_rtx_EQ (DImode, x, oldval);
4563 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4564 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4565 }
4566 emit_unlikely_jump (x, label2);
4567
4568 emit_move_insn (scratch, newval);
4569 emit_store_conditional (mode, cond, mem, scratch);
4570
4571 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4572 emit_unlikely_jump (x, label1);
4573
4574 emit_insn (gen_memory_barrier ());
4575 emit_label (XEXP (label2, 0));
4576 }
4577
4578 void
4579 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4580 {
4581 enum machine_mode mode = GET_MODE (mem);
4582 rtx addr, align, wdst;
4583 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4584
4585 addr = force_reg (DImode, XEXP (mem, 0));
4586 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4587 NULL_RTX, 1, OPTAB_DIRECT);
4588
4589 oldval = convert_modes (DImode, mode, oldval, 1);
4590 newval = emit_insxl (mode, newval, addr);
4591
4592 wdst = gen_reg_rtx (DImode);
4593 if (mode == QImode)
4594 fn5 = gen_sync_compare_and_swapqi_1;
4595 else
4596 fn5 = gen_sync_compare_and_swaphi_1;
4597 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4598
4599 emit_move_insn (dst, gen_lowpart (mode, wdst));
4600 }
4601
4602 void
4603 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4604 rtx oldval, rtx newval, rtx align,
4605 rtx scratch, rtx cond)
4606 {
4607 rtx label1, label2, mem, width, mask, x;
4608
4609 mem = gen_rtx_MEM (DImode, align);
4610 MEM_VOLATILE_P (mem) = 1;
4611
4612 emit_insn (gen_memory_barrier ());
4613 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4614 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4615 emit_label (XEXP (label1, 0));
4616
4617 emit_load_locked (DImode, scratch, mem);
4618
4619 width = GEN_INT (GET_MODE_BITSIZE (mode));
4620 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4621 if (WORDS_BIG_ENDIAN)
4622 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4623 else
4624 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4625
4626 if (oldval == const0_rtx)
4627 x = gen_rtx_NE (DImode, dest, const0_rtx);
4628 else
4629 {
4630 x = gen_rtx_EQ (DImode, dest, oldval);
4631 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4632 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4633 }
4634 emit_unlikely_jump (x, label2);
4635
4636 if (WORDS_BIG_ENDIAN)
4637 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4638 else
4639 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4640 emit_insn (gen_iordi3 (scratch, scratch, newval));
4641
4642 emit_store_conditional (DImode, scratch, mem, scratch);
4643
4644 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4645 emit_unlikely_jump (x, label1);
4646
4647 emit_insn (gen_memory_barrier ());
4648 emit_label (XEXP (label2, 0));
4649 }
4650
4651 /* Expand an atomic exchange operation. */
4652
4653 void
4654 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4655 {
4656 enum machine_mode mode = GET_MODE (mem);
4657 rtx label, x, cond = gen_lowpart (DImode, scratch);
4658
4659 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4660 emit_label (XEXP (label, 0));
4661
4662 emit_load_locked (mode, retval, mem);
4663 emit_move_insn (scratch, val);
4664 emit_store_conditional (mode, cond, mem, scratch);
4665
4666 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4667 emit_unlikely_jump (x, label);
4668
4669 emit_insn (gen_memory_barrier ());
4670 }
4671
4672 void
4673 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4674 {
4675 enum machine_mode mode = GET_MODE (mem);
4676 rtx addr, align, wdst;
4677 rtx (*fn4) (rtx, rtx, rtx, rtx);
4678
4679 /* Force the address into a register. */
4680 addr = force_reg (DImode, XEXP (mem, 0));
4681
4682 /* Align it to a multiple of 8. */
4683 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4684 NULL_RTX, 1, OPTAB_DIRECT);
4685
4686 /* Insert val into the correct byte location within the word. */
4687 val = emit_insxl (mode, val, addr);
4688
4689 wdst = gen_reg_rtx (DImode);
4690 if (mode == QImode)
4691 fn4 = gen_sync_lock_test_and_setqi_1;
4692 else
4693 fn4 = gen_sync_lock_test_and_sethi_1;
4694 emit_insn (fn4 (wdst, addr, val, align));
4695
4696 emit_move_insn (dst, gen_lowpart (mode, wdst));
4697 }
4698
4699 void
4700 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4701 rtx val, rtx align, rtx scratch)
4702 {
4703 rtx label, mem, width, mask, x;
4704
4705 mem = gen_rtx_MEM (DImode, align);
4706 MEM_VOLATILE_P (mem) = 1;
4707
4708 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4709 emit_label (XEXP (label, 0));
4710
4711 emit_load_locked (DImode, scratch, mem);
4712
4713 width = GEN_INT (GET_MODE_BITSIZE (mode));
4714 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4715 if (WORDS_BIG_ENDIAN)
4716 {
4717 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4718 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4719 }
4720 else
4721 {
4722 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4723 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4724 }
4725 emit_insn (gen_iordi3 (scratch, scratch, val));
4726
4727 emit_store_conditional (DImode, scratch, mem, scratch);
4728
4729 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4730 emit_unlikely_jump (x, label);
4731
4732 emit_insn (gen_memory_barrier ());
4733 }
4734 \f
4735 /* Adjust the cost of a scheduling dependency. Return the new cost of
4736 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4737
4738 static int
4739 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4740 {
4741 enum attr_type dep_insn_type;
4742
4743 /* If the dependence is an anti-dependence, there is no cost. For an
4744 output dependence, there is sometimes a cost, but it doesn't seem
4745 worth handling those few cases. */
4746 if (REG_NOTE_KIND (link) != 0)
4747 return cost;
4748
4749 /* If we can't recognize the insns, we can't really do anything. */
4750 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4751 return cost;
4752
4753 dep_insn_type = get_attr_type (dep_insn);
4754
4755 /* Bring in the user-defined memory latency. */
4756 if (dep_insn_type == TYPE_ILD
4757 || dep_insn_type == TYPE_FLD
4758 || dep_insn_type == TYPE_LDSYM)
4759 cost += alpha_memory_latency-1;
4760
4761 /* Everything else handled in DFA bypasses now. */
4762
4763 return cost;
4764 }
4765
4766 /* The number of instructions that can be issued per cycle. */
4767
4768 static int
4769 alpha_issue_rate (void)
4770 {
4771 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4772 }
4773
4774 /* How many alternative schedules to try. This should be as wide as the
4775 scheduling freedom in the DFA, but no wider. Making this value too
4776 large results extra work for the scheduler.
4777
4778 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4779 alternative schedules. For EV5, we can choose between E0/E1 and
4780 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4781
4782 static int
4783 alpha_multipass_dfa_lookahead (void)
4784 {
4785 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4786 }
4787 \f
4788 /* Machine-specific function data. */
4789
4790 struct GTY(()) machine_function
4791 {
4792 /* For unicosmk. */
4793 /* List of call information words for calls from this function. */
4794 struct rtx_def *first_ciw;
4795 struct rtx_def *last_ciw;
4796 int ciw_count;
4797
4798 /* List of deferred case vectors. */
4799 struct rtx_def *addr_list;
4800
4801 /* For OSF. */
4802 const char *some_ld_name;
4803
4804 /* For TARGET_LD_BUGGY_LDGP. */
4805 struct rtx_def *gp_save_rtx;
4806
4807 /* For VMS condition handlers. */
4808 bool uses_condition_handler;
4809 };
4810
4811 /* How to allocate a 'struct machine_function'. */
4812
4813 static struct machine_function *
4814 alpha_init_machine_status (void)
4815 {
4816 return ggc_alloc_cleared_machine_function ();
4817 }
4818
4819 /* Support for frame based VMS condition handlers. */
4820
4821 /* A VMS condition handler may be established for a function with a call to
4822 __builtin_establish_vms_condition_handler, and cancelled with a call to
4823 __builtin_revert_vms_condition_handler.
4824
4825 The VMS Condition Handling Facility knows about the existence of a handler
4826 from the procedure descriptor .handler field. As the VMS native compilers,
4827 we store the user specified handler's address at a fixed location in the
4828 stack frame and point the procedure descriptor at a common wrapper which
4829 fetches the real handler's address and issues an indirect call.
4830
4831 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4832
4833 We force the procedure kind to PT_STACK, and the fixed frame location is
4834 fp+8, just before the register save area. We use the handler_data field in
4835 the procedure descriptor to state the fp offset at which the installed
4836 handler address can be found. */
4837
4838 #define VMS_COND_HANDLER_FP_OFFSET 8
4839
4840 /* Expand code to store the currently installed user VMS condition handler
4841 into TARGET and install HANDLER as the new condition handler. */
4842
4843 void
4844 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4845 {
4846 rtx handler_slot_address
4847 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4848
4849 rtx handler_slot
4850 = gen_rtx_MEM (DImode, handler_slot_address);
4851
4852 emit_move_insn (target, handler_slot);
4853 emit_move_insn (handler_slot, handler);
4854
4855 /* Notify the start/prologue/epilogue emitters that the condition handler
4856 slot is needed. In addition to reserving the slot space, this will force
4857 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4858 use above is correct. */
4859 cfun->machine->uses_condition_handler = true;
4860 }
4861
4862 /* Expand code to store the current VMS condition handler into TARGET and
4863 nullify it. */
4864
4865 void
4866 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4867 {
4868 /* We implement this by establishing a null condition handler, with the tiny
4869 side effect of setting uses_condition_handler. This is a little bit
4870 pessimistic if no actual builtin_establish call is ever issued, which is
4871 not a real problem and expected never to happen anyway. */
4872
4873 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4874 }
4875
4876 /* Functions to save and restore alpha_return_addr_rtx. */
4877
4878 /* Start the ball rolling with RETURN_ADDR_RTX. */
4879
4880 rtx
4881 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4882 {
4883 if (count != 0)
4884 return const0_rtx;
4885
4886 return get_hard_reg_initial_val (Pmode, REG_RA);
4887 }
4888
4889 /* Return or create a memory slot containing the gp value for the current
4890 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4891
4892 rtx
4893 alpha_gp_save_rtx (void)
4894 {
4895 rtx seq, m = cfun->machine->gp_save_rtx;
4896
4897 if (m == NULL)
4898 {
4899 start_sequence ();
4900
4901 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4902 m = validize_mem (m);
4903 emit_move_insn (m, pic_offset_table_rtx);
4904
4905 seq = get_insns ();
4906 end_sequence ();
4907
4908 /* We used to simply emit the sequence after entry_of_function.
4909 However this breaks the CFG if the first instruction in the
4910 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4911 label. Emit the sequence properly on the edge. We are only
4912 invoked from dw2_build_landing_pads and finish_eh_generation
4913 will call commit_edge_insertions thanks to a kludge. */
4914 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4915
4916 cfun->machine->gp_save_rtx = m;
4917 }
4918
4919 return m;
4920 }
4921
4922 static int
4923 alpha_ra_ever_killed (void)
4924 {
4925 rtx top;
4926
4927 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4928 return (int)df_regs_ever_live_p (REG_RA);
4929
4930 push_topmost_sequence ();
4931 top = get_insns ();
4932 pop_topmost_sequence ();
4933
4934 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4935 }
4936
4937 \f
4938 /* Return the trap mode suffix applicable to the current
4939 instruction, or NULL. */
4940
4941 static const char *
4942 get_trap_mode_suffix (void)
4943 {
4944 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4945
4946 switch (s)
4947 {
4948 case TRAP_SUFFIX_NONE:
4949 return NULL;
4950
4951 case TRAP_SUFFIX_SU:
4952 if (alpha_fptm >= ALPHA_FPTM_SU)
4953 return "su";
4954 return NULL;
4955
4956 case TRAP_SUFFIX_SUI:
4957 if (alpha_fptm >= ALPHA_FPTM_SUI)
4958 return "sui";
4959 return NULL;
4960
4961 case TRAP_SUFFIX_V_SV:
4962 switch (alpha_fptm)
4963 {
4964 case ALPHA_FPTM_N:
4965 return NULL;
4966 case ALPHA_FPTM_U:
4967 return "v";
4968 case ALPHA_FPTM_SU:
4969 case ALPHA_FPTM_SUI:
4970 return "sv";
4971 default:
4972 gcc_unreachable ();
4973 }
4974
4975 case TRAP_SUFFIX_V_SV_SVI:
4976 switch (alpha_fptm)
4977 {
4978 case ALPHA_FPTM_N:
4979 return NULL;
4980 case ALPHA_FPTM_U:
4981 return "v";
4982 case ALPHA_FPTM_SU:
4983 return "sv";
4984 case ALPHA_FPTM_SUI:
4985 return "svi";
4986 default:
4987 gcc_unreachable ();
4988 }
4989 break;
4990
4991 case TRAP_SUFFIX_U_SU_SUI:
4992 switch (alpha_fptm)
4993 {
4994 case ALPHA_FPTM_N:
4995 return NULL;
4996 case ALPHA_FPTM_U:
4997 return "u";
4998 case ALPHA_FPTM_SU:
4999 return "su";
5000 case ALPHA_FPTM_SUI:
5001 return "sui";
5002 default:
5003 gcc_unreachable ();
5004 }
5005 break;
5006
5007 default:
5008 gcc_unreachable ();
5009 }
5010 gcc_unreachable ();
5011 }
5012
5013 /* Return the rounding mode suffix applicable to the current
5014 instruction, or NULL. */
5015
5016 static const char *
5017 get_round_mode_suffix (void)
5018 {
5019 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5020
5021 switch (s)
5022 {
5023 case ROUND_SUFFIX_NONE:
5024 return NULL;
5025 case ROUND_SUFFIX_NORMAL:
5026 switch (alpha_fprm)
5027 {
5028 case ALPHA_FPRM_NORM:
5029 return NULL;
5030 case ALPHA_FPRM_MINF:
5031 return "m";
5032 case ALPHA_FPRM_CHOP:
5033 return "c";
5034 case ALPHA_FPRM_DYN:
5035 return "d";
5036 default:
5037 gcc_unreachable ();
5038 }
5039 break;
5040
5041 case ROUND_SUFFIX_C:
5042 return "c";
5043
5044 default:
5045 gcc_unreachable ();
5046 }
5047 gcc_unreachable ();
5048 }
5049
5050 /* Locate some local-dynamic symbol still in use by this function
5051 so that we can print its name in some movdi_er_tlsldm pattern. */
5052
5053 static int
5054 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5055 {
5056 rtx x = *px;
5057
5058 if (GET_CODE (x) == SYMBOL_REF
5059 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5060 {
5061 cfun->machine->some_ld_name = XSTR (x, 0);
5062 return 1;
5063 }
5064
5065 return 0;
5066 }
5067
5068 static const char *
5069 get_some_local_dynamic_name (void)
5070 {
5071 rtx insn;
5072
5073 if (cfun->machine->some_ld_name)
5074 return cfun->machine->some_ld_name;
5075
5076 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5077 if (INSN_P (insn)
5078 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5079 return cfun->machine->some_ld_name;
5080
5081 gcc_unreachable ();
5082 }
5083
5084 /* Print an operand. Recognize special options, documented below. */
5085
5086 void
5087 print_operand (FILE *file, rtx x, int code)
5088 {
5089 int i;
5090
5091 switch (code)
5092 {
5093 case '~':
5094 /* Print the assembler name of the current function. */
5095 assemble_name (file, alpha_fnname);
5096 break;
5097
5098 case '&':
5099 assemble_name (file, get_some_local_dynamic_name ());
5100 break;
5101
5102 case '/':
5103 {
5104 const char *trap = get_trap_mode_suffix ();
5105 const char *round = get_round_mode_suffix ();
5106
5107 if (trap || round)
5108 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5109 (trap ? trap : ""), (round ? round : ""));
5110 break;
5111 }
5112
5113 case ',':
5114 /* Generates single precision instruction suffix. */
5115 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5116 break;
5117
5118 case '-':
5119 /* Generates double precision instruction suffix. */
5120 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5121 break;
5122
5123 case '#':
5124 if (alpha_this_literal_sequence_number == 0)
5125 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5126 fprintf (file, "%d", alpha_this_literal_sequence_number);
5127 break;
5128
5129 case '*':
5130 if (alpha_this_gpdisp_sequence_number == 0)
5131 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5132 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5133 break;
5134
5135 case 'H':
5136 if (GET_CODE (x) == HIGH)
5137 output_addr_const (file, XEXP (x, 0));
5138 else
5139 output_operand_lossage ("invalid %%H value");
5140 break;
5141
5142 case 'J':
5143 {
5144 const char *lituse;
5145
5146 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5147 {
5148 x = XVECEXP (x, 0, 0);
5149 lituse = "lituse_tlsgd";
5150 }
5151 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5152 {
5153 x = XVECEXP (x, 0, 0);
5154 lituse = "lituse_tlsldm";
5155 }
5156 else if (CONST_INT_P (x))
5157 lituse = "lituse_jsr";
5158 else
5159 {
5160 output_operand_lossage ("invalid %%J value");
5161 break;
5162 }
5163
5164 if (x != const0_rtx)
5165 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5166 }
5167 break;
5168
5169 case 'j':
5170 {
5171 const char *lituse;
5172
5173 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5174 lituse = "lituse_jsrdirect";
5175 #else
5176 lituse = "lituse_jsr";
5177 #endif
5178
5179 gcc_assert (INTVAL (x) != 0);
5180 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5181 }
5182 break;
5183 case 'r':
5184 /* If this operand is the constant zero, write it as "$31". */
5185 if (REG_P (x))
5186 fprintf (file, "%s", reg_names[REGNO (x)]);
5187 else if (x == CONST0_RTX (GET_MODE (x)))
5188 fprintf (file, "$31");
5189 else
5190 output_operand_lossage ("invalid %%r value");
5191 break;
5192
5193 case 'R':
5194 /* Similar, but for floating-point. */
5195 if (REG_P (x))
5196 fprintf (file, "%s", reg_names[REGNO (x)]);
5197 else if (x == CONST0_RTX (GET_MODE (x)))
5198 fprintf (file, "$f31");
5199 else
5200 output_operand_lossage ("invalid %%R value");
5201 break;
5202
5203 case 'N':
5204 /* Write the 1's complement of a constant. */
5205 if (!CONST_INT_P (x))
5206 output_operand_lossage ("invalid %%N value");
5207
5208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5209 break;
5210
5211 case 'P':
5212 /* Write 1 << C, for a constant C. */
5213 if (!CONST_INT_P (x))
5214 output_operand_lossage ("invalid %%P value");
5215
5216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5217 break;
5218
5219 case 'h':
5220 /* Write the high-order 16 bits of a constant, sign-extended. */
5221 if (!CONST_INT_P (x))
5222 output_operand_lossage ("invalid %%h value");
5223
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5225 break;
5226
5227 case 'L':
5228 /* Write the low-order 16 bits of a constant, sign-extended. */
5229 if (!CONST_INT_P (x))
5230 output_operand_lossage ("invalid %%L value");
5231
5232 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5233 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5234 break;
5235
5236 case 'm':
5237 /* Write mask for ZAP insn. */
5238 if (GET_CODE (x) == CONST_DOUBLE)
5239 {
5240 HOST_WIDE_INT mask = 0;
5241 HOST_WIDE_INT value;
5242
5243 value = CONST_DOUBLE_LOW (x);
5244 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5245 i++, value >>= 8)
5246 if (value & 0xff)
5247 mask |= (1 << i);
5248
5249 value = CONST_DOUBLE_HIGH (x);
5250 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5251 i++, value >>= 8)
5252 if (value & 0xff)
5253 mask |= (1 << (i + sizeof (int)));
5254
5255 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5256 }
5257
5258 else if (CONST_INT_P (x))
5259 {
5260 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5261
5262 for (i = 0; i < 8; i++, value >>= 8)
5263 if (value & 0xff)
5264 mask |= (1 << i);
5265
5266 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5267 }
5268 else
5269 output_operand_lossage ("invalid %%m value");
5270 break;
5271
5272 case 'M':
5273 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5274 if (!CONST_INT_P (x)
5275 || (INTVAL (x) != 8 && INTVAL (x) != 16
5276 && INTVAL (x) != 32 && INTVAL (x) != 64))
5277 output_operand_lossage ("invalid %%M value");
5278
5279 fprintf (file, "%s",
5280 (INTVAL (x) == 8 ? "b"
5281 : INTVAL (x) == 16 ? "w"
5282 : INTVAL (x) == 32 ? "l"
5283 : "q"));
5284 break;
5285
5286 case 'U':
5287 /* Similar, except do it from the mask. */
5288 if (CONST_INT_P (x))
5289 {
5290 HOST_WIDE_INT value = INTVAL (x);
5291
5292 if (value == 0xff)
5293 {
5294 fputc ('b', file);
5295 break;
5296 }
5297 if (value == 0xffff)
5298 {
5299 fputc ('w', file);
5300 break;
5301 }
5302 if (value == 0xffffffff)
5303 {
5304 fputc ('l', file);
5305 break;
5306 }
5307 if (value == -1)
5308 {
5309 fputc ('q', file);
5310 break;
5311 }
5312 }
5313 else if (HOST_BITS_PER_WIDE_INT == 32
5314 && GET_CODE (x) == CONST_DOUBLE
5315 && CONST_DOUBLE_LOW (x) == 0xffffffff
5316 && CONST_DOUBLE_HIGH (x) == 0)
5317 {
5318 fputc ('l', file);
5319 break;
5320 }
5321 output_operand_lossage ("invalid %%U value");
5322 break;
5323
5324 case 's':
5325 /* Write the constant value divided by 8 for little-endian mode or
5326 (56 - value) / 8 for big-endian mode. */
5327
5328 if (!CONST_INT_P (x)
5329 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5330 ? 56
5331 : 64)
5332 || (INTVAL (x) & 7) != 0)
5333 output_operand_lossage ("invalid %%s value");
5334
5335 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5336 WORDS_BIG_ENDIAN
5337 ? (56 - INTVAL (x)) / 8
5338 : INTVAL (x) / 8);
5339 break;
5340
5341 case 'S':
5342 /* Same, except compute (64 - c) / 8 */
5343
5344 if (!CONST_INT_P (x)
5345 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5346 && (INTVAL (x) & 7) != 8)
5347 output_operand_lossage ("invalid %%s value");
5348
5349 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5350 break;
5351
5352 case 't':
5353 {
5354 /* On Unicos/Mk systems: use a DEX expression if the symbol
5355 clashes with a register name. */
5356 int dex = unicosmk_need_dex (x);
5357 if (dex)
5358 fprintf (file, "DEX(%d)", dex);
5359 else
5360 output_addr_const (file, x);
5361 }
5362 break;
5363
5364 case 'C': case 'D': case 'c': case 'd':
5365 /* Write out comparison name. */
5366 {
5367 enum rtx_code c = GET_CODE (x);
5368
5369 if (!COMPARISON_P (x))
5370 output_operand_lossage ("invalid %%C value");
5371
5372 else if (code == 'D')
5373 c = reverse_condition (c);
5374 else if (code == 'c')
5375 c = swap_condition (c);
5376 else if (code == 'd')
5377 c = swap_condition (reverse_condition (c));
5378
5379 if (c == LEU)
5380 fprintf (file, "ule");
5381 else if (c == LTU)
5382 fprintf (file, "ult");
5383 else if (c == UNORDERED)
5384 fprintf (file, "un");
5385 else
5386 fprintf (file, "%s", GET_RTX_NAME (c));
5387 }
5388 break;
5389
5390 case 'E':
5391 /* Write the divide or modulus operator. */
5392 switch (GET_CODE (x))
5393 {
5394 case DIV:
5395 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5396 break;
5397 case UDIV:
5398 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5399 break;
5400 case MOD:
5401 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5402 break;
5403 case UMOD:
5404 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5405 break;
5406 default:
5407 output_operand_lossage ("invalid %%E value");
5408 break;
5409 }
5410 break;
5411
5412 case 'A':
5413 /* Write "_u" for unaligned access. */
5414 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5415 fprintf (file, "_u");
5416 break;
5417
5418 case 0:
5419 if (REG_P (x))
5420 fprintf (file, "%s", reg_names[REGNO (x)]);
5421 else if (MEM_P (x))
5422 output_address (XEXP (x, 0));
5423 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5424 {
5425 switch (XINT (XEXP (x, 0), 1))
5426 {
5427 case UNSPEC_DTPREL:
5428 case UNSPEC_TPREL:
5429 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5430 break;
5431 default:
5432 output_operand_lossage ("unknown relocation unspec");
5433 break;
5434 }
5435 }
5436 else
5437 output_addr_const (file, x);
5438 break;
5439
5440 default:
5441 output_operand_lossage ("invalid %%xn code");
5442 }
5443 }
5444
5445 void
5446 print_operand_address (FILE *file, rtx addr)
5447 {
5448 int basereg = 31;
5449 HOST_WIDE_INT offset = 0;
5450
5451 if (GET_CODE (addr) == AND)
5452 addr = XEXP (addr, 0);
5453
5454 if (GET_CODE (addr) == PLUS
5455 && CONST_INT_P (XEXP (addr, 1)))
5456 {
5457 offset = INTVAL (XEXP (addr, 1));
5458 addr = XEXP (addr, 0);
5459 }
5460
5461 if (GET_CODE (addr) == LO_SUM)
5462 {
5463 const char *reloc16, *reloclo;
5464 rtx op1 = XEXP (addr, 1);
5465
5466 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5467 {
5468 op1 = XEXP (op1, 0);
5469 switch (XINT (op1, 1))
5470 {
5471 case UNSPEC_DTPREL:
5472 reloc16 = NULL;
5473 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5474 break;
5475 case UNSPEC_TPREL:
5476 reloc16 = NULL;
5477 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5478 break;
5479 default:
5480 output_operand_lossage ("unknown relocation unspec");
5481 return;
5482 }
5483
5484 output_addr_const (file, XVECEXP (op1, 0, 0));
5485 }
5486 else
5487 {
5488 reloc16 = "gprel";
5489 reloclo = "gprellow";
5490 output_addr_const (file, op1);
5491 }
5492
5493 if (offset)
5494 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5495
5496 addr = XEXP (addr, 0);
5497 switch (GET_CODE (addr))
5498 {
5499 case REG:
5500 basereg = REGNO (addr);
5501 break;
5502
5503 case SUBREG:
5504 basereg = subreg_regno (addr);
5505 break;
5506
5507 default:
5508 gcc_unreachable ();
5509 }
5510
5511 fprintf (file, "($%d)\t\t!%s", basereg,
5512 (basereg == 29 ? reloc16 : reloclo));
5513 return;
5514 }
5515
5516 switch (GET_CODE (addr))
5517 {
5518 case REG:
5519 basereg = REGNO (addr);
5520 break;
5521
5522 case SUBREG:
5523 basereg = subreg_regno (addr);
5524 break;
5525
5526 case CONST_INT:
5527 offset = INTVAL (addr);
5528 break;
5529
5530 #if TARGET_ABI_OPEN_VMS
5531 case SYMBOL_REF:
5532 fprintf (file, "%s", XSTR (addr, 0));
5533 return;
5534
5535 case CONST:
5536 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5537 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5538 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5539 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5540 INTVAL (XEXP (XEXP (addr, 0), 1)));
5541 return;
5542
5543 #endif
5544 default:
5545 gcc_unreachable ();
5546 }
5547
5548 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5549 }
5550 \f
5551 /* Emit RTL insns to initialize the variable parts of a trampoline at
5552 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5553 for the static chain value for the function. */
5554
5555 static void
5556 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5557 {
5558 rtx fnaddr, mem, word1, word2;
5559
5560 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5561
5562 #ifdef POINTERS_EXTEND_UNSIGNED
5563 fnaddr = convert_memory_address (Pmode, fnaddr);
5564 chain_value = convert_memory_address (Pmode, chain_value);
5565 #endif
5566
5567 if (TARGET_ABI_OPEN_VMS)
5568 {
5569 const char *fnname;
5570 char *trname;
5571
5572 /* Construct the name of the trampoline entry point. */
5573 fnname = XSTR (fnaddr, 0);
5574 trname = (char *) alloca (strlen (fnname) + 5);
5575 strcpy (trname, fnname);
5576 strcat (trname, "..tr");
5577 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5578 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5579
5580 /* Trampoline (or "bounded") procedure descriptor is constructed from
5581 the function's procedure descriptor with certain fields zeroed IAW
5582 the VMS calling standard. This is stored in the first quadword. */
5583 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5584 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5585 }
5586 else
5587 {
5588 /* These 4 instructions are:
5589 ldq $1,24($27)
5590 ldq $27,16($27)
5591 jmp $31,($27),0
5592 nop
5593 We don't bother setting the HINT field of the jump; the nop
5594 is merely there for padding. */
5595 word1 = GEN_INT (0xa77b0010a43b0018);
5596 word2 = GEN_INT (0x47ff041f6bfb0000);
5597 }
5598
5599 /* Store the first two words, as computed above. */
5600 mem = adjust_address (m_tramp, DImode, 0);
5601 emit_move_insn (mem, word1);
5602 mem = adjust_address (m_tramp, DImode, 8);
5603 emit_move_insn (mem, word2);
5604
5605 /* Store function address and static chain value. */
5606 mem = adjust_address (m_tramp, Pmode, 16);
5607 emit_move_insn (mem, fnaddr);
5608 mem = adjust_address (m_tramp, Pmode, 24);
5609 emit_move_insn (mem, chain_value);
5610
5611 if (!TARGET_ABI_OPEN_VMS)
5612 {
5613 emit_insn (gen_imb ());
5614 #ifdef ENABLE_EXECUTE_STACK
5615 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5616 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5617 #endif
5618 }
5619 }
5620 \f
5621 /* Determine where to put an argument to a function.
5622 Value is zero to push the argument on the stack,
5623 or a hard register in which to store the argument.
5624
5625 MODE is the argument's machine mode.
5626 TYPE is the data type of the argument (as a tree).
5627 This is null for libcalls where that information may
5628 not be available.
5629 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5630 the preceding args and about the function being called.
5631 NAMED is nonzero if this argument is a named parameter
5632 (otherwise it is an extra parameter matching an ellipsis).
5633
5634 On Alpha the first 6 words of args are normally in registers
5635 and the rest are pushed. */
5636
5637 static rtx
5638 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5639 const_tree type, bool named ATTRIBUTE_UNUSED)
5640 {
5641 int basereg;
5642 int num_args;
5643
5644 /* Don't get confused and pass small structures in FP registers. */
5645 if (type && AGGREGATE_TYPE_P (type))
5646 basereg = 16;
5647 else
5648 {
5649 #ifdef ENABLE_CHECKING
5650 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5651 values here. */
5652 gcc_assert (!COMPLEX_MODE_P (mode));
5653 #endif
5654
5655 /* Set up defaults for FP operands passed in FP registers, and
5656 integral operands passed in integer registers. */
5657 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5658 basereg = 32 + 16;
5659 else
5660 basereg = 16;
5661 }
5662
5663 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5664 the two platforms, so we can't avoid conditional compilation. */
5665 #if TARGET_ABI_OPEN_VMS
5666 {
5667 if (mode == VOIDmode)
5668 return alpha_arg_info_reg_val (cum);
5669
5670 num_args = cum->num_args;
5671 if (num_args >= 6
5672 || targetm.calls.must_pass_in_stack (mode, type))
5673 return NULL_RTX;
5674 }
5675 #elif TARGET_ABI_OSF
5676 {
5677 if (*cum >= 6)
5678 return NULL_RTX;
5679 num_args = *cum;
5680
5681 /* VOID is passed as a special flag for "last argument". */
5682 if (type == void_type_node)
5683 basereg = 16;
5684 else if (targetm.calls.must_pass_in_stack (mode, type))
5685 return NULL_RTX;
5686 }
5687 #else
5688 #error Unhandled ABI
5689 #endif
5690
5691 return gen_rtx_REG (mode, num_args + basereg);
5692 }
5693
5694 /* Update the data in CUM to advance over an argument
5695 of mode MODE and data type TYPE.
5696 (TYPE is null for libcalls where that information may not be available.) */
5697
5698 static void
5699 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5700 const_tree type, bool named)
5701 {
5702 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5703 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5704
5705 #if TARGET_ABI_OSF
5706 *cum += increment;
5707 #else
5708 if (!onstack && cum->num_args < 6)
5709 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5710 cum->num_args += increment;
5711 #endif
5712 }
5713
5714 static int
5715 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5716 enum machine_mode mode ATTRIBUTE_UNUSED,
5717 tree type ATTRIBUTE_UNUSED,
5718 bool named ATTRIBUTE_UNUSED)
5719 {
5720 int words = 0;
5721
5722 #if TARGET_ABI_OPEN_VMS
5723 if (cum->num_args < 6
5724 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5725 words = 6 - cum->num_args;
5726 #elif TARGET_ABI_UNICOSMK
5727 /* Never any split arguments. */
5728 #elif TARGET_ABI_OSF
5729 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5730 words = 6 - *cum;
5731 #else
5732 #error Unhandled ABI
5733 #endif
5734
5735 return words * UNITS_PER_WORD;
5736 }
5737
5738
5739 /* Return true if TYPE must be returned in memory, instead of in registers. */
5740
5741 static bool
5742 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5743 {
5744 enum machine_mode mode = VOIDmode;
5745 int size;
5746
5747 if (type)
5748 {
5749 mode = TYPE_MODE (type);
5750
5751 /* All aggregates are returned in memory, except on OpenVMS where
5752 records that fit 64 bits should be returned by immediate value
5753 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5754 if (TARGET_ABI_OPEN_VMS
5755 && TREE_CODE (type) != ARRAY_TYPE
5756 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5757 return false;
5758
5759 if (AGGREGATE_TYPE_P (type))
5760 return true;
5761 }
5762
5763 size = GET_MODE_SIZE (mode);
5764 switch (GET_MODE_CLASS (mode))
5765 {
5766 case MODE_VECTOR_FLOAT:
5767 /* Pass all float vectors in memory, like an aggregate. */
5768 return true;
5769
5770 case MODE_COMPLEX_FLOAT:
5771 /* We judge complex floats on the size of their element,
5772 not the size of the whole type. */
5773 size = GET_MODE_UNIT_SIZE (mode);
5774 break;
5775
5776 case MODE_INT:
5777 case MODE_FLOAT:
5778 case MODE_COMPLEX_INT:
5779 case MODE_VECTOR_INT:
5780 break;
5781
5782 default:
5783 /* ??? We get called on all sorts of random stuff from
5784 aggregate_value_p. We must return something, but it's not
5785 clear what's safe to return. Pretend it's a struct I
5786 guess. */
5787 return true;
5788 }
5789
5790 /* Otherwise types must fit in one register. */
5791 return size > UNITS_PER_WORD;
5792 }
5793
5794 /* Return true if TYPE should be passed by invisible reference. */
5795
5796 static bool
5797 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5798 enum machine_mode mode,
5799 const_tree type ATTRIBUTE_UNUSED,
5800 bool named ATTRIBUTE_UNUSED)
5801 {
5802 return mode == TFmode || mode == TCmode;
5803 }
5804
5805 /* Define how to find the value returned by a function. VALTYPE is the
5806 data type of the value (as a tree). If the precise function being
5807 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5808 MODE is set instead of VALTYPE for libcalls.
5809
5810 On Alpha the value is found in $0 for integer functions and
5811 $f0 for floating-point functions. */
5812
5813 rtx
5814 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5815 enum machine_mode mode)
5816 {
5817 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5818 enum mode_class mclass;
5819
5820 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5821
5822 if (valtype)
5823 mode = TYPE_MODE (valtype);
5824
5825 mclass = GET_MODE_CLASS (mode);
5826 switch (mclass)
5827 {
5828 case MODE_INT:
5829 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5830 where we have them returning both SImode and DImode. */
5831 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5832 PROMOTE_MODE (mode, dummy, valtype);
5833 /* FALLTHRU */
5834
5835 case MODE_COMPLEX_INT:
5836 case MODE_VECTOR_INT:
5837 regnum = 0;
5838 break;
5839
5840 case MODE_FLOAT:
5841 regnum = 32;
5842 break;
5843
5844 case MODE_COMPLEX_FLOAT:
5845 {
5846 enum machine_mode cmode = GET_MODE_INNER (mode);
5847
5848 return gen_rtx_PARALLEL
5849 (VOIDmode,
5850 gen_rtvec (2,
5851 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5852 const0_rtx),
5853 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5854 GEN_INT (GET_MODE_SIZE (cmode)))));
5855 }
5856
5857 case MODE_RANDOM:
5858 /* We should only reach here for BLKmode on VMS. */
5859 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5860 regnum = 0;
5861 break;
5862
5863 default:
5864 gcc_unreachable ();
5865 }
5866
5867 return gen_rtx_REG (mode, regnum);
5868 }
5869
5870 /* TCmode complex values are passed by invisible reference. We
5871 should not split these values. */
5872
5873 static bool
5874 alpha_split_complex_arg (const_tree type)
5875 {
5876 return TYPE_MODE (type) != TCmode;
5877 }
5878
5879 static tree
5880 alpha_build_builtin_va_list (void)
5881 {
5882 tree base, ofs, space, record, type_decl;
5883
5884 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5885 return ptr_type_node;
5886
5887 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5888 type_decl = build_decl (BUILTINS_LOCATION,
5889 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5890 TREE_CHAIN (record) = type_decl;
5891 TYPE_NAME (record) = type_decl;
5892
5893 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5894
5895 /* Dummy field to prevent alignment warnings. */
5896 space = build_decl (BUILTINS_LOCATION,
5897 FIELD_DECL, NULL_TREE, integer_type_node);
5898 DECL_FIELD_CONTEXT (space) = record;
5899 DECL_ARTIFICIAL (space) = 1;
5900 DECL_IGNORED_P (space) = 1;
5901
5902 ofs = build_decl (BUILTINS_LOCATION,
5903 FIELD_DECL, get_identifier ("__offset"),
5904 integer_type_node);
5905 DECL_FIELD_CONTEXT (ofs) = record;
5906 DECL_CHAIN (ofs) = space;
5907 /* ??? This is a hack, __offset is marked volatile to prevent
5908 DCE that confuses stdarg optimization and results in
5909 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5910 TREE_THIS_VOLATILE (ofs) = 1;
5911
5912 base = build_decl (BUILTINS_LOCATION,
5913 FIELD_DECL, get_identifier ("__base"),
5914 ptr_type_node);
5915 DECL_FIELD_CONTEXT (base) = record;
5916 DECL_CHAIN (base) = ofs;
5917
5918 TYPE_FIELDS (record) = base;
5919 layout_type (record);
5920
5921 va_list_gpr_counter_field = ofs;
5922 return record;
5923 }
5924
5925 #if TARGET_ABI_OSF
5926 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5927 and constant additions. */
5928
5929 static gimple
5930 va_list_skip_additions (tree lhs)
5931 {
5932 gimple stmt;
5933
5934 for (;;)
5935 {
5936 enum tree_code code;
5937
5938 stmt = SSA_NAME_DEF_STMT (lhs);
5939
5940 if (gimple_code (stmt) == GIMPLE_PHI)
5941 return stmt;
5942
5943 if (!is_gimple_assign (stmt)
5944 || gimple_assign_lhs (stmt) != lhs)
5945 return NULL;
5946
5947 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5948 return stmt;
5949 code = gimple_assign_rhs_code (stmt);
5950 if (!CONVERT_EXPR_CODE_P (code)
5951 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5952 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5953 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5954 return stmt;
5955
5956 lhs = gimple_assign_rhs1 (stmt);
5957 }
5958 }
5959
5960 /* Check if LHS = RHS statement is
5961 LHS = *(ap.__base + ap.__offset + cst)
5962 or
5963 LHS = *(ap.__base
5964 + ((ap.__offset + cst <= 47)
5965 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5966 If the former, indicate that GPR registers are needed,
5967 if the latter, indicate that FPR registers are needed.
5968
5969 Also look for LHS = (*ptr).field, where ptr is one of the forms
5970 listed above.
5971
5972 On alpha, cfun->va_list_gpr_size is used as size of the needed
5973 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5974 registers are needed and bit 1 set if FPR registers are needed.
5975 Return true if va_list references should not be scanned for the
5976 current statement. */
5977
5978 static bool
5979 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5980 {
5981 tree base, offset, rhs;
5982 int offset_arg = 1;
5983 gimple base_stmt;
5984
5985 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5986 != GIMPLE_SINGLE_RHS)
5987 return false;
5988
5989 rhs = gimple_assign_rhs1 (stmt);
5990 while (handled_component_p (rhs))
5991 rhs = TREE_OPERAND (rhs, 0);
5992 if (TREE_CODE (rhs) != MEM_REF
5993 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5994 return false;
5995
5996 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5997 if (stmt == NULL
5998 || !is_gimple_assign (stmt)
5999 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6000 return false;
6001
6002 base = gimple_assign_rhs1 (stmt);
6003 if (TREE_CODE (base) == SSA_NAME)
6004 {
6005 base_stmt = va_list_skip_additions (base);
6006 if (base_stmt
6007 && is_gimple_assign (base_stmt)
6008 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6009 base = gimple_assign_rhs1 (base_stmt);
6010 }
6011
6012 if (TREE_CODE (base) != COMPONENT_REF
6013 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6014 {
6015 base = gimple_assign_rhs2 (stmt);
6016 if (TREE_CODE (base) == SSA_NAME)
6017 {
6018 base_stmt = va_list_skip_additions (base);
6019 if (base_stmt
6020 && is_gimple_assign (base_stmt)
6021 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6022 base = gimple_assign_rhs1 (base_stmt);
6023 }
6024
6025 if (TREE_CODE (base) != COMPONENT_REF
6026 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6027 return false;
6028
6029 offset_arg = 0;
6030 }
6031
6032 base = get_base_address (base);
6033 if (TREE_CODE (base) != VAR_DECL
6034 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6035 return false;
6036
6037 offset = gimple_op (stmt, 1 + offset_arg);
6038 if (TREE_CODE (offset) == SSA_NAME)
6039 {
6040 gimple offset_stmt = va_list_skip_additions (offset);
6041
6042 if (offset_stmt
6043 && gimple_code (offset_stmt) == GIMPLE_PHI)
6044 {
6045 HOST_WIDE_INT sub;
6046 gimple arg1_stmt, arg2_stmt;
6047 tree arg1, arg2;
6048 enum tree_code code1, code2;
6049
6050 if (gimple_phi_num_args (offset_stmt) != 2)
6051 goto escapes;
6052
6053 arg1_stmt
6054 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6055 arg2_stmt
6056 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6057 if (arg1_stmt == NULL
6058 || !is_gimple_assign (arg1_stmt)
6059 || arg2_stmt == NULL
6060 || !is_gimple_assign (arg2_stmt))
6061 goto escapes;
6062
6063 code1 = gimple_assign_rhs_code (arg1_stmt);
6064 code2 = gimple_assign_rhs_code (arg2_stmt);
6065 if (code1 == COMPONENT_REF
6066 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6067 /* Do nothing. */;
6068 else if (code2 == COMPONENT_REF
6069 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6070 {
6071 gimple tem = arg1_stmt;
6072 code2 = code1;
6073 arg1_stmt = arg2_stmt;
6074 arg2_stmt = tem;
6075 }
6076 else
6077 goto escapes;
6078
6079 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6080 goto escapes;
6081
6082 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6083 if (code2 == MINUS_EXPR)
6084 sub = -sub;
6085 if (sub < -48 || sub > -32)
6086 goto escapes;
6087
6088 arg1 = gimple_assign_rhs1 (arg1_stmt);
6089 arg2 = gimple_assign_rhs1 (arg2_stmt);
6090 if (TREE_CODE (arg2) == SSA_NAME)
6091 {
6092 arg2_stmt = va_list_skip_additions (arg2);
6093 if (arg2_stmt == NULL
6094 || !is_gimple_assign (arg2_stmt)
6095 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6096 goto escapes;
6097 arg2 = gimple_assign_rhs1 (arg2_stmt);
6098 }
6099 if (arg1 != arg2)
6100 goto escapes;
6101
6102 if (TREE_CODE (arg1) != COMPONENT_REF
6103 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6104 || get_base_address (arg1) != base)
6105 goto escapes;
6106
6107 /* Need floating point regs. */
6108 cfun->va_list_fpr_size |= 2;
6109 return false;
6110 }
6111 if (offset_stmt
6112 && is_gimple_assign (offset_stmt)
6113 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6114 offset = gimple_assign_rhs1 (offset_stmt);
6115 }
6116 if (TREE_CODE (offset) != COMPONENT_REF
6117 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6118 || get_base_address (offset) != base)
6119 goto escapes;
6120 else
6121 /* Need general regs. */
6122 cfun->va_list_fpr_size |= 1;
6123 return false;
6124
6125 escapes:
6126 si->va_list_escapes = true;
6127 return false;
6128 }
6129 #endif
6130
6131 /* Perform any needed actions needed for a function that is receiving a
6132 variable number of arguments. */
6133
6134 static void
6135 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6136 tree type, int *pretend_size, int no_rtl)
6137 {
6138 CUMULATIVE_ARGS cum = *pcum;
6139
6140 /* Skip the current argument. */
6141 targetm.calls.function_arg_advance (cum, mode, type, true);
6142
6143 #if TARGET_ABI_UNICOSMK
6144 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6145 arguments on the stack. Unfortunately, it doesn't always store the first
6146 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6147 with stdargs as we always have at least one named argument there. */
6148 if (cum.num_reg_words < 6)
6149 {
6150 if (!no_rtl)
6151 {
6152 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6153 emit_insn (gen_arg_home_umk ());
6154 }
6155 *pretend_size = 0;
6156 }
6157 #elif TARGET_ABI_OPEN_VMS
6158 /* For VMS, we allocate space for all 6 arg registers plus a count.
6159
6160 However, if NO registers need to be saved, don't allocate any space.
6161 This is not only because we won't need the space, but because AP
6162 includes the current_pretend_args_size and we don't want to mess up
6163 any ap-relative addresses already made. */
6164 if (cum.num_args < 6)
6165 {
6166 if (!no_rtl)
6167 {
6168 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6169 emit_insn (gen_arg_home ());
6170 }
6171 *pretend_size = 7 * UNITS_PER_WORD;
6172 }
6173 #else
6174 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6175 only push those that are remaining. However, if NO registers need to
6176 be saved, don't allocate any space. This is not only because we won't
6177 need the space, but because AP includes the current_pretend_args_size
6178 and we don't want to mess up any ap-relative addresses already made.
6179
6180 If we are not to use the floating-point registers, save the integer
6181 registers where we would put the floating-point registers. This is
6182 not the most efficient way to implement varargs with just one register
6183 class, but it isn't worth doing anything more efficient in this rare
6184 case. */
6185 if (cum >= 6)
6186 return;
6187
6188 if (!no_rtl)
6189 {
6190 int count;
6191 alias_set_type set = get_varargs_alias_set ();
6192 rtx tmp;
6193
6194 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6195 if (count > 6 - cum)
6196 count = 6 - cum;
6197
6198 /* Detect whether integer registers or floating-point registers
6199 are needed by the detected va_arg statements. See above for
6200 how these values are computed. Note that the "escape" value
6201 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6202 these bits set. */
6203 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6204
6205 if (cfun->va_list_fpr_size & 1)
6206 {
6207 tmp = gen_rtx_MEM (BLKmode,
6208 plus_constant (virtual_incoming_args_rtx,
6209 (cum + 6) * UNITS_PER_WORD));
6210 MEM_NOTRAP_P (tmp) = 1;
6211 set_mem_alias_set (tmp, set);
6212 move_block_from_reg (16 + cum, tmp, count);
6213 }
6214
6215 if (cfun->va_list_fpr_size & 2)
6216 {
6217 tmp = gen_rtx_MEM (BLKmode,
6218 plus_constant (virtual_incoming_args_rtx,
6219 cum * UNITS_PER_WORD));
6220 MEM_NOTRAP_P (tmp) = 1;
6221 set_mem_alias_set (tmp, set);
6222 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6223 }
6224 }
6225 *pretend_size = 12 * UNITS_PER_WORD;
6226 #endif
6227 }
6228
6229 static void
6230 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6231 {
6232 HOST_WIDE_INT offset;
6233 tree t, offset_field, base_field;
6234
6235 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6236 return;
6237
6238 if (TARGET_ABI_UNICOSMK)
6239 std_expand_builtin_va_start (valist, nextarg);
6240
6241 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6242 up by 48, storing fp arg registers in the first 48 bytes, and the
6243 integer arg registers in the next 48 bytes. This is only done,
6244 however, if any integer registers need to be stored.
6245
6246 If no integer registers need be stored, then we must subtract 48
6247 in order to account for the integer arg registers which are counted
6248 in argsize above, but which are not actually stored on the stack.
6249 Must further be careful here about structures straddling the last
6250 integer argument register; that futzes with pretend_args_size,
6251 which changes the meaning of AP. */
6252
6253 if (NUM_ARGS < 6)
6254 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6255 else
6256 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6257
6258 if (TARGET_ABI_OPEN_VMS)
6259 {
6260 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6261 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6262 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6263 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6264 TREE_SIDE_EFFECTS (t) = 1;
6265 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6266 }
6267 else
6268 {
6269 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6270 offset_field = DECL_CHAIN (base_field);
6271
6272 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6273 valist, base_field, NULL_TREE);
6274 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6275 valist, offset_field, NULL_TREE);
6276
6277 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6278 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6279 size_int (offset));
6280 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6281 TREE_SIDE_EFFECTS (t) = 1;
6282 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6283
6284 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6285 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6286 TREE_SIDE_EFFECTS (t) = 1;
6287 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6288 }
6289 }
6290
6291 static tree
6292 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6293 gimple_seq *pre_p)
6294 {
6295 tree type_size, ptr_type, addend, t, addr;
6296 gimple_seq internal_post;
6297
6298 /* If the type could not be passed in registers, skip the block
6299 reserved for the registers. */
6300 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6301 {
6302 t = build_int_cst (TREE_TYPE (offset), 6*8);
6303 gimplify_assign (offset,
6304 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6305 pre_p);
6306 }
6307
6308 addend = offset;
6309 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6310
6311 if (TREE_CODE (type) == COMPLEX_TYPE)
6312 {
6313 tree real_part, imag_part, real_temp;
6314
6315 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6316 offset, pre_p);
6317
6318 /* Copy the value into a new temporary, lest the formal temporary
6319 be reused out from under us. */
6320 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6321
6322 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6323 offset, pre_p);
6324
6325 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6326 }
6327 else if (TREE_CODE (type) == REAL_TYPE)
6328 {
6329 tree fpaddend, cond, fourtyeight;
6330
6331 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6332 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6333 addend, fourtyeight);
6334 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6335 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6336 fpaddend, addend);
6337 }
6338
6339 /* Build the final address and force that value into a temporary. */
6340 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6341 fold_convert (sizetype, addend));
6342 internal_post = NULL;
6343 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6344 gimple_seq_add_seq (pre_p, internal_post);
6345
6346 /* Update the offset field. */
6347 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6348 if (type_size == NULL || TREE_OVERFLOW (type_size))
6349 t = size_zero_node;
6350 else
6351 {
6352 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6353 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6354 t = size_binop (MULT_EXPR, t, size_int (8));
6355 }
6356 t = fold_convert (TREE_TYPE (offset), t);
6357 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6358 pre_p);
6359
6360 return build_va_arg_indirect_ref (addr);
6361 }
6362
6363 static tree
6364 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6365 gimple_seq *post_p)
6366 {
6367 tree offset_field, base_field, offset, base, t, r;
6368 bool indirect;
6369
6370 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6371 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6372
6373 base_field = TYPE_FIELDS (va_list_type_node);
6374 offset_field = DECL_CHAIN (base_field);
6375 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6376 valist, base_field, NULL_TREE);
6377 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6378 valist, offset_field, NULL_TREE);
6379
6380 /* Pull the fields of the structure out into temporaries. Since we never
6381 modify the base field, we can use a formal temporary. Sign-extend the
6382 offset field so that it's the proper width for pointer arithmetic. */
6383 base = get_formal_tmp_var (base_field, pre_p);
6384
6385 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6386 offset = get_initialized_tmp_var (t, pre_p, NULL);
6387
6388 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6389 if (indirect)
6390 type = build_pointer_type_for_mode (type, ptr_mode, true);
6391
6392 /* Find the value. Note that this will be a stable indirection, or
6393 a composite of stable indirections in the case of complex. */
6394 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6395
6396 /* Stuff the offset temporary back into its field. */
6397 gimplify_assign (unshare_expr (offset_field),
6398 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6399
6400 if (indirect)
6401 r = build_va_arg_indirect_ref (r);
6402
6403 return r;
6404 }
6405 \f
6406 /* Builtins. */
6407
6408 enum alpha_builtin
6409 {
6410 ALPHA_BUILTIN_CMPBGE,
6411 ALPHA_BUILTIN_EXTBL,
6412 ALPHA_BUILTIN_EXTWL,
6413 ALPHA_BUILTIN_EXTLL,
6414 ALPHA_BUILTIN_EXTQL,
6415 ALPHA_BUILTIN_EXTWH,
6416 ALPHA_BUILTIN_EXTLH,
6417 ALPHA_BUILTIN_EXTQH,
6418 ALPHA_BUILTIN_INSBL,
6419 ALPHA_BUILTIN_INSWL,
6420 ALPHA_BUILTIN_INSLL,
6421 ALPHA_BUILTIN_INSQL,
6422 ALPHA_BUILTIN_INSWH,
6423 ALPHA_BUILTIN_INSLH,
6424 ALPHA_BUILTIN_INSQH,
6425 ALPHA_BUILTIN_MSKBL,
6426 ALPHA_BUILTIN_MSKWL,
6427 ALPHA_BUILTIN_MSKLL,
6428 ALPHA_BUILTIN_MSKQL,
6429 ALPHA_BUILTIN_MSKWH,
6430 ALPHA_BUILTIN_MSKLH,
6431 ALPHA_BUILTIN_MSKQH,
6432 ALPHA_BUILTIN_UMULH,
6433 ALPHA_BUILTIN_ZAP,
6434 ALPHA_BUILTIN_ZAPNOT,
6435 ALPHA_BUILTIN_AMASK,
6436 ALPHA_BUILTIN_IMPLVER,
6437 ALPHA_BUILTIN_RPCC,
6438 ALPHA_BUILTIN_THREAD_POINTER,
6439 ALPHA_BUILTIN_SET_THREAD_POINTER,
6440 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6441 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6442
6443 /* TARGET_MAX */
6444 ALPHA_BUILTIN_MINUB8,
6445 ALPHA_BUILTIN_MINSB8,
6446 ALPHA_BUILTIN_MINUW4,
6447 ALPHA_BUILTIN_MINSW4,
6448 ALPHA_BUILTIN_MAXUB8,
6449 ALPHA_BUILTIN_MAXSB8,
6450 ALPHA_BUILTIN_MAXUW4,
6451 ALPHA_BUILTIN_MAXSW4,
6452 ALPHA_BUILTIN_PERR,
6453 ALPHA_BUILTIN_PKLB,
6454 ALPHA_BUILTIN_PKWB,
6455 ALPHA_BUILTIN_UNPKBL,
6456 ALPHA_BUILTIN_UNPKBW,
6457
6458 /* TARGET_CIX */
6459 ALPHA_BUILTIN_CTTZ,
6460 ALPHA_BUILTIN_CTLZ,
6461 ALPHA_BUILTIN_CTPOP,
6462
6463 ALPHA_BUILTIN_max
6464 };
6465
6466 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6467 CODE_FOR_builtin_cmpbge,
6468 CODE_FOR_builtin_extbl,
6469 CODE_FOR_builtin_extwl,
6470 CODE_FOR_builtin_extll,
6471 CODE_FOR_builtin_extql,
6472 CODE_FOR_builtin_extwh,
6473 CODE_FOR_builtin_extlh,
6474 CODE_FOR_builtin_extqh,
6475 CODE_FOR_builtin_insbl,
6476 CODE_FOR_builtin_inswl,
6477 CODE_FOR_builtin_insll,
6478 CODE_FOR_builtin_insql,
6479 CODE_FOR_builtin_inswh,
6480 CODE_FOR_builtin_inslh,
6481 CODE_FOR_builtin_insqh,
6482 CODE_FOR_builtin_mskbl,
6483 CODE_FOR_builtin_mskwl,
6484 CODE_FOR_builtin_mskll,
6485 CODE_FOR_builtin_mskql,
6486 CODE_FOR_builtin_mskwh,
6487 CODE_FOR_builtin_msklh,
6488 CODE_FOR_builtin_mskqh,
6489 CODE_FOR_umuldi3_highpart,
6490 CODE_FOR_builtin_zap,
6491 CODE_FOR_builtin_zapnot,
6492 CODE_FOR_builtin_amask,
6493 CODE_FOR_builtin_implver,
6494 CODE_FOR_builtin_rpcc,
6495 CODE_FOR_load_tp,
6496 CODE_FOR_set_tp,
6497 CODE_FOR_builtin_establish_vms_condition_handler,
6498 CODE_FOR_builtin_revert_vms_condition_handler,
6499
6500 /* TARGET_MAX */
6501 CODE_FOR_builtin_minub8,
6502 CODE_FOR_builtin_minsb8,
6503 CODE_FOR_builtin_minuw4,
6504 CODE_FOR_builtin_minsw4,
6505 CODE_FOR_builtin_maxub8,
6506 CODE_FOR_builtin_maxsb8,
6507 CODE_FOR_builtin_maxuw4,
6508 CODE_FOR_builtin_maxsw4,
6509 CODE_FOR_builtin_perr,
6510 CODE_FOR_builtin_pklb,
6511 CODE_FOR_builtin_pkwb,
6512 CODE_FOR_builtin_unpkbl,
6513 CODE_FOR_builtin_unpkbw,
6514
6515 /* TARGET_CIX */
6516 CODE_FOR_ctzdi2,
6517 CODE_FOR_clzdi2,
6518 CODE_FOR_popcountdi2
6519 };
6520
6521 struct alpha_builtin_def
6522 {
6523 const char *name;
6524 enum alpha_builtin code;
6525 unsigned int target_mask;
6526 bool is_const;
6527 };
6528
6529 static struct alpha_builtin_def const zero_arg_builtins[] = {
6530 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6531 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6532 };
6533
6534 static struct alpha_builtin_def const one_arg_builtins[] = {
6535 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6536 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6537 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6538 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6539 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6540 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6541 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6542 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6543 };
6544
6545 static struct alpha_builtin_def const two_arg_builtins[] = {
6546 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6547 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6548 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6549 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6550 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6551 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6552 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6553 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6554 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6555 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6556 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6557 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6558 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6559 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6560 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6561 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6562 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6563 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6564 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6565 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6566 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6567 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6568 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6569 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6570 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6571 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6572 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6573 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6574 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6575 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6576 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6577 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6578 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6579 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6580 };
6581
6582 static GTY(()) tree alpha_v8qi_u;
6583 static GTY(()) tree alpha_v8qi_s;
6584 static GTY(()) tree alpha_v4hi_u;
6585 static GTY(()) tree alpha_v4hi_s;
6586
6587 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6588
6589 /* Return the alpha builtin for CODE. */
6590
6591 static tree
6592 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6593 {
6594 if (code >= ALPHA_BUILTIN_max)
6595 return error_mark_node;
6596 return alpha_builtins[code];
6597 }
6598
6599 /* Helper function of alpha_init_builtins. Add the built-in specified
6600 by NAME, TYPE, CODE, and ECF. */
6601
6602 static void
6603 alpha_builtin_function (const char *name, tree ftype,
6604 enum alpha_builtin code, unsigned ecf)
6605 {
6606 tree decl = add_builtin_function (name, ftype, (int) code,
6607 BUILT_IN_MD, NULL, NULL_TREE);
6608
6609 if (ecf & ECF_CONST)
6610 TREE_READONLY (decl) = 1;
6611 if (ecf & ECF_NOTHROW)
6612 TREE_NOTHROW (decl) = 1;
6613
6614 alpha_builtins [(int) code] = decl;
6615 }
6616
6617 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6618 functions pointed to by P, with function type FTYPE. */
6619
6620 static void
6621 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6622 tree ftype)
6623 {
6624 size_t i;
6625
6626 for (i = 0; i < count; ++i, ++p)
6627 if ((target_flags & p->target_mask) == p->target_mask)
6628 alpha_builtin_function (p->name, ftype, p->code,
6629 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6630 }
6631
6632 static void
6633 alpha_init_builtins (void)
6634 {
6635 tree dimode_integer_type_node;
6636 tree ftype;
6637
6638 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6639
6640 /* Fwrite on VMS is non-standard. */
6641 #if TARGET_ABI_OPEN_VMS
6642 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6643 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6644 #endif
6645
6646 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6647 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6648 ftype);
6649
6650 ftype = build_function_type_list (dimode_integer_type_node,
6651 dimode_integer_type_node, NULL_TREE);
6652 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6653 ftype);
6654
6655 ftype = build_function_type_list (dimode_integer_type_node,
6656 dimode_integer_type_node,
6657 dimode_integer_type_node, NULL_TREE);
6658 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6659 ftype);
6660
6661 ftype = build_function_type (ptr_type_node, void_list_node);
6662 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6663 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6664
6665 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6666 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6667 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6668
6669 if (TARGET_ABI_OPEN_VMS)
6670 {
6671 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6672 NULL_TREE);
6673 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6674 ftype,
6675 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6676 0);
6677
6678 ftype = build_function_type_list (ptr_type_node, void_type_node,
6679 NULL_TREE);
6680 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6681 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6682 }
6683
6684 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6685 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6686 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6687 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6688 }
6689
6690 /* Expand an expression EXP that calls a built-in function,
6691 with result going to TARGET if that's convenient
6692 (and in mode MODE if that's convenient).
6693 SUBTARGET may be used as the target for computing one of EXP's operands.
6694 IGNORE is nonzero if the value is to be ignored. */
6695
6696 static rtx
6697 alpha_expand_builtin (tree exp, rtx target,
6698 rtx subtarget ATTRIBUTE_UNUSED,
6699 enum machine_mode mode ATTRIBUTE_UNUSED,
6700 int ignore ATTRIBUTE_UNUSED)
6701 {
6702 #define MAX_ARGS 2
6703
6704 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6705 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6706 tree arg;
6707 call_expr_arg_iterator iter;
6708 enum insn_code icode;
6709 rtx op[MAX_ARGS], pat;
6710 int arity;
6711 bool nonvoid;
6712
6713 if (fcode >= ALPHA_BUILTIN_max)
6714 internal_error ("bad builtin fcode");
6715 icode = code_for_builtin[fcode];
6716 if (icode == 0)
6717 internal_error ("bad builtin fcode");
6718
6719 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6720
6721 arity = 0;
6722 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6723 {
6724 const struct insn_operand_data *insn_op;
6725
6726 if (arg == error_mark_node)
6727 return NULL_RTX;
6728 if (arity > MAX_ARGS)
6729 return NULL_RTX;
6730
6731 insn_op = &insn_data[icode].operand[arity + nonvoid];
6732
6733 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6734
6735 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6736 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6737 arity++;
6738 }
6739
6740 if (nonvoid)
6741 {
6742 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6743 if (!target
6744 || GET_MODE (target) != tmode
6745 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6746 target = gen_reg_rtx (tmode);
6747 }
6748
6749 switch (arity)
6750 {
6751 case 0:
6752 pat = GEN_FCN (icode) (target);
6753 break;
6754 case 1:
6755 if (nonvoid)
6756 pat = GEN_FCN (icode) (target, op[0]);
6757 else
6758 pat = GEN_FCN (icode) (op[0]);
6759 break;
6760 case 2:
6761 pat = GEN_FCN (icode) (target, op[0], op[1]);
6762 break;
6763 default:
6764 gcc_unreachable ();
6765 }
6766 if (!pat)
6767 return NULL_RTX;
6768 emit_insn (pat);
6769
6770 if (nonvoid)
6771 return target;
6772 else
6773 return const0_rtx;
6774 }
6775
6776
6777 /* Several bits below assume HWI >= 64 bits. This should be enforced
6778 by config.gcc. */
6779 #if HOST_BITS_PER_WIDE_INT < 64
6780 # error "HOST_WIDE_INT too small"
6781 #endif
6782
6783 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6784 with an 8-bit output vector. OPINT contains the integer operands; bit N
6785 of OP_CONST is set if OPINT[N] is valid. */
6786
6787 static tree
6788 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6789 {
6790 if (op_const == 3)
6791 {
6792 int i, val;
6793 for (i = 0, val = 0; i < 8; ++i)
6794 {
6795 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6796 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6797 if (c0 >= c1)
6798 val |= 1 << i;
6799 }
6800 return build_int_cst (long_integer_type_node, val);
6801 }
6802 else if (op_const == 2 && opint[1] == 0)
6803 return build_int_cst (long_integer_type_node, 0xff);
6804 return NULL;
6805 }
6806
6807 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6808 specialized form of an AND operation. Other byte manipulation instructions
6809 are defined in terms of this instruction, so this is also used as a
6810 subroutine for other builtins.
6811
6812 OP contains the tree operands; OPINT contains the extracted integer values.
6813 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6814 OPINT may be considered. */
6815
6816 static tree
6817 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6818 long op_const)
6819 {
6820 if (op_const & 2)
6821 {
6822 unsigned HOST_WIDE_INT mask = 0;
6823 int i;
6824
6825 for (i = 0; i < 8; ++i)
6826 if ((opint[1] >> i) & 1)
6827 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6828
6829 if (op_const & 1)
6830 return build_int_cst (long_integer_type_node, opint[0] & mask);
6831
6832 if (op)
6833 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6834 build_int_cst (long_integer_type_node, mask));
6835 }
6836 else if ((op_const & 1) && opint[0] == 0)
6837 return build_int_cst (long_integer_type_node, 0);
6838 return NULL;
6839 }
6840
6841 /* Fold the builtins for the EXT family of instructions. */
6842
6843 static tree
6844 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6845 long op_const, unsigned HOST_WIDE_INT bytemask,
6846 bool is_high)
6847 {
6848 long zap_const = 2;
6849 tree *zap_op = NULL;
6850
6851 if (op_const & 2)
6852 {
6853 unsigned HOST_WIDE_INT loc;
6854
6855 loc = opint[1] & 7;
6856 if (BYTES_BIG_ENDIAN)
6857 loc ^= 7;
6858 loc *= 8;
6859
6860 if (loc != 0)
6861 {
6862 if (op_const & 1)
6863 {
6864 unsigned HOST_WIDE_INT temp = opint[0];
6865 if (is_high)
6866 temp <<= loc;
6867 else
6868 temp >>= loc;
6869 opint[0] = temp;
6870 zap_const = 3;
6871 }
6872 }
6873 else
6874 zap_op = op;
6875 }
6876
6877 opint[1] = bytemask;
6878 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6879 }
6880
6881 /* Fold the builtins for the INS family of instructions. */
6882
6883 static tree
6884 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6885 long op_const, unsigned HOST_WIDE_INT bytemask,
6886 bool is_high)
6887 {
6888 if ((op_const & 1) && opint[0] == 0)
6889 return build_int_cst (long_integer_type_node, 0);
6890
6891 if (op_const & 2)
6892 {
6893 unsigned HOST_WIDE_INT temp, loc, byteloc;
6894 tree *zap_op = NULL;
6895
6896 loc = opint[1] & 7;
6897 if (BYTES_BIG_ENDIAN)
6898 loc ^= 7;
6899 bytemask <<= loc;
6900
6901 temp = opint[0];
6902 if (is_high)
6903 {
6904 byteloc = (64 - (loc * 8)) & 0x3f;
6905 if (byteloc == 0)
6906 zap_op = op;
6907 else
6908 temp >>= byteloc;
6909 bytemask >>= 8;
6910 }
6911 else
6912 {
6913 byteloc = loc * 8;
6914 if (byteloc == 0)
6915 zap_op = op;
6916 else
6917 temp <<= byteloc;
6918 }
6919
6920 opint[0] = temp;
6921 opint[1] = bytemask;
6922 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6923 }
6924
6925 return NULL;
6926 }
6927
6928 static tree
6929 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6930 long op_const, unsigned HOST_WIDE_INT bytemask,
6931 bool is_high)
6932 {
6933 if (op_const & 2)
6934 {
6935 unsigned HOST_WIDE_INT loc;
6936
6937 loc = opint[1] & 7;
6938 if (BYTES_BIG_ENDIAN)
6939 loc ^= 7;
6940 bytemask <<= loc;
6941
6942 if (is_high)
6943 bytemask >>= 8;
6944
6945 opint[1] = bytemask ^ 0xff;
6946 }
6947
6948 return alpha_fold_builtin_zapnot (op, opint, op_const);
6949 }
6950
6951 static tree
6952 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6953 {
6954 switch (op_const)
6955 {
6956 case 3:
6957 {
6958 unsigned HOST_WIDE_INT l;
6959 HOST_WIDE_INT h;
6960
6961 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6962
6963 #if HOST_BITS_PER_WIDE_INT > 64
6964 # error fixme
6965 #endif
6966
6967 return build_int_cst (long_integer_type_node, h);
6968 }
6969
6970 case 1:
6971 opint[1] = opint[0];
6972 /* FALLTHRU */
6973 case 2:
6974 /* Note that (X*1) >> 64 == 0. */
6975 if (opint[1] == 0 || opint[1] == 1)
6976 return build_int_cst (long_integer_type_node, 0);
6977 break;
6978 }
6979 return NULL;
6980 }
6981
6982 static tree
6983 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6984 {
6985 tree op0 = fold_convert (vtype, op[0]);
6986 tree op1 = fold_convert (vtype, op[1]);
6987 tree val = fold_build2 (code, vtype, op0, op1);
6988 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6989 }
6990
6991 static tree
6992 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6993 {
6994 unsigned HOST_WIDE_INT temp = 0;
6995 int i;
6996
6997 if (op_const != 3)
6998 return NULL;
6999
7000 for (i = 0; i < 8; ++i)
7001 {
7002 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7003 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7004 if (a >= b)
7005 temp += a - b;
7006 else
7007 temp += b - a;
7008 }
7009
7010 return build_int_cst (long_integer_type_node, temp);
7011 }
7012
7013 static tree
7014 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7015 {
7016 unsigned HOST_WIDE_INT temp;
7017
7018 if (op_const == 0)
7019 return NULL;
7020
7021 temp = opint[0] & 0xff;
7022 temp |= (opint[0] >> 24) & 0xff00;
7023
7024 return build_int_cst (long_integer_type_node, temp);
7025 }
7026
7027 static tree
7028 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7029 {
7030 unsigned HOST_WIDE_INT temp;
7031
7032 if (op_const == 0)
7033 return NULL;
7034
7035 temp = opint[0] & 0xff;
7036 temp |= (opint[0] >> 8) & 0xff00;
7037 temp |= (opint[0] >> 16) & 0xff0000;
7038 temp |= (opint[0] >> 24) & 0xff000000;
7039
7040 return build_int_cst (long_integer_type_node, temp);
7041 }
7042
7043 static tree
7044 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7045 {
7046 unsigned HOST_WIDE_INT temp;
7047
7048 if (op_const == 0)
7049 return NULL;
7050
7051 temp = opint[0] & 0xff;
7052 temp |= (opint[0] & 0xff00) << 24;
7053
7054 return build_int_cst (long_integer_type_node, temp);
7055 }
7056
7057 static tree
7058 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7059 {
7060 unsigned HOST_WIDE_INT temp;
7061
7062 if (op_const == 0)
7063 return NULL;
7064
7065 temp = opint[0] & 0xff;
7066 temp |= (opint[0] & 0x0000ff00) << 8;
7067 temp |= (opint[0] & 0x00ff0000) << 16;
7068 temp |= (opint[0] & 0xff000000) << 24;
7069
7070 return build_int_cst (long_integer_type_node, temp);
7071 }
7072
7073 static tree
7074 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7075 {
7076 unsigned HOST_WIDE_INT temp;
7077
7078 if (op_const == 0)
7079 return NULL;
7080
7081 if (opint[0] == 0)
7082 temp = 64;
7083 else
7084 temp = exact_log2 (opint[0] & -opint[0]);
7085
7086 return build_int_cst (long_integer_type_node, temp);
7087 }
7088
7089 static tree
7090 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7091 {
7092 unsigned HOST_WIDE_INT temp;
7093
7094 if (op_const == 0)
7095 return NULL;
7096
7097 if (opint[0] == 0)
7098 temp = 64;
7099 else
7100 temp = 64 - floor_log2 (opint[0]) - 1;
7101
7102 return build_int_cst (long_integer_type_node, temp);
7103 }
7104
7105 static tree
7106 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7107 {
7108 unsigned HOST_WIDE_INT temp, op;
7109
7110 if (op_const == 0)
7111 return NULL;
7112
7113 op = opint[0];
7114 temp = 0;
7115 while (op)
7116 temp++, op &= op - 1;
7117
7118 return build_int_cst (long_integer_type_node, temp);
7119 }
7120
7121 /* Fold one of our builtin functions. */
7122
7123 static tree
7124 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
7125 bool ignore ATTRIBUTE_UNUSED)
7126 {
7127 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7128 long op_const = 0;
7129 int i;
7130
7131 if (n_args >= MAX_ARGS)
7132 return NULL;
7133
7134 for (i = 0; i < n_args; i++)
7135 {
7136 tree arg = op[i];
7137 if (arg == error_mark_node)
7138 return NULL;
7139
7140 opint[i] = 0;
7141 if (TREE_CODE (arg) == INTEGER_CST)
7142 {
7143 op_const |= 1L << i;
7144 opint[i] = int_cst_value (arg);
7145 }
7146 }
7147
7148 switch (DECL_FUNCTION_CODE (fndecl))
7149 {
7150 case ALPHA_BUILTIN_CMPBGE:
7151 return alpha_fold_builtin_cmpbge (opint, op_const);
7152
7153 case ALPHA_BUILTIN_EXTBL:
7154 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7155 case ALPHA_BUILTIN_EXTWL:
7156 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7157 case ALPHA_BUILTIN_EXTLL:
7158 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7159 case ALPHA_BUILTIN_EXTQL:
7160 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7161 case ALPHA_BUILTIN_EXTWH:
7162 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7163 case ALPHA_BUILTIN_EXTLH:
7164 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7165 case ALPHA_BUILTIN_EXTQH:
7166 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7167
7168 case ALPHA_BUILTIN_INSBL:
7169 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7170 case ALPHA_BUILTIN_INSWL:
7171 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7172 case ALPHA_BUILTIN_INSLL:
7173 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7174 case ALPHA_BUILTIN_INSQL:
7175 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7176 case ALPHA_BUILTIN_INSWH:
7177 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7178 case ALPHA_BUILTIN_INSLH:
7179 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7180 case ALPHA_BUILTIN_INSQH:
7181 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7182
7183 case ALPHA_BUILTIN_MSKBL:
7184 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7185 case ALPHA_BUILTIN_MSKWL:
7186 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7187 case ALPHA_BUILTIN_MSKLL:
7188 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7189 case ALPHA_BUILTIN_MSKQL:
7190 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7191 case ALPHA_BUILTIN_MSKWH:
7192 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7193 case ALPHA_BUILTIN_MSKLH:
7194 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7195 case ALPHA_BUILTIN_MSKQH:
7196 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7197
7198 case ALPHA_BUILTIN_UMULH:
7199 return alpha_fold_builtin_umulh (opint, op_const);
7200
7201 case ALPHA_BUILTIN_ZAP:
7202 opint[1] ^= 0xff;
7203 /* FALLTHRU */
7204 case ALPHA_BUILTIN_ZAPNOT:
7205 return alpha_fold_builtin_zapnot (op, opint, op_const);
7206
7207 case ALPHA_BUILTIN_MINUB8:
7208 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7209 case ALPHA_BUILTIN_MINSB8:
7210 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7211 case ALPHA_BUILTIN_MINUW4:
7212 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7213 case ALPHA_BUILTIN_MINSW4:
7214 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7215 case ALPHA_BUILTIN_MAXUB8:
7216 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7217 case ALPHA_BUILTIN_MAXSB8:
7218 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7219 case ALPHA_BUILTIN_MAXUW4:
7220 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7221 case ALPHA_BUILTIN_MAXSW4:
7222 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7223
7224 case ALPHA_BUILTIN_PERR:
7225 return alpha_fold_builtin_perr (opint, op_const);
7226 case ALPHA_BUILTIN_PKLB:
7227 return alpha_fold_builtin_pklb (opint, op_const);
7228 case ALPHA_BUILTIN_PKWB:
7229 return alpha_fold_builtin_pkwb (opint, op_const);
7230 case ALPHA_BUILTIN_UNPKBL:
7231 return alpha_fold_builtin_unpkbl (opint, op_const);
7232 case ALPHA_BUILTIN_UNPKBW:
7233 return alpha_fold_builtin_unpkbw (opint, op_const);
7234
7235 case ALPHA_BUILTIN_CTTZ:
7236 return alpha_fold_builtin_cttz (opint, op_const);
7237 case ALPHA_BUILTIN_CTLZ:
7238 return alpha_fold_builtin_ctlz (opint, op_const);
7239 case ALPHA_BUILTIN_CTPOP:
7240 return alpha_fold_builtin_ctpop (opint, op_const);
7241
7242 case ALPHA_BUILTIN_AMASK:
7243 case ALPHA_BUILTIN_IMPLVER:
7244 case ALPHA_BUILTIN_RPCC:
7245 case ALPHA_BUILTIN_THREAD_POINTER:
7246 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7247 /* None of these are foldable at compile-time. */
7248 default:
7249 return NULL;
7250 }
7251 }
7252 \f
7253 /* This page contains routines that are used to determine what the function
7254 prologue and epilogue code will do and write them out. */
7255
7256 /* Compute the size of the save area in the stack. */
7257
7258 /* These variables are used for communication between the following functions.
7259 They indicate various things about the current function being compiled
7260 that are used to tell what kind of prologue, epilogue and procedure
7261 descriptor to generate. */
7262
7263 /* Nonzero if we need a stack procedure. */
7264 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7265 static enum alpha_procedure_types alpha_procedure_type;
7266
7267 /* Register number (either FP or SP) that is used to unwind the frame. */
7268 static int vms_unwind_regno;
7269
7270 /* Register number used to save FP. We need not have one for RA since
7271 we don't modify it for register procedures. This is only defined
7272 for register frame procedures. */
7273 static int vms_save_fp_regno;
7274
7275 /* Register number used to reference objects off our PV. */
7276 static int vms_base_regno;
7277
7278 /* Compute register masks for saved registers. */
7279
7280 static void
7281 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7282 {
7283 unsigned long imask = 0;
7284 unsigned long fmask = 0;
7285 unsigned int i;
7286
7287 /* When outputting a thunk, we don't have valid register life info,
7288 but assemble_start_function wants to output .frame and .mask
7289 directives. */
7290 if (cfun->is_thunk)
7291 {
7292 *imaskP = 0;
7293 *fmaskP = 0;
7294 return;
7295 }
7296
7297 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7298 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7299
7300 /* One for every register we have to save. */
7301 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7302 if (! fixed_regs[i] && ! call_used_regs[i]
7303 && df_regs_ever_live_p (i) && i != REG_RA
7304 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7305 {
7306 if (i < 32)
7307 imask |= (1UL << i);
7308 else
7309 fmask |= (1UL << (i - 32));
7310 }
7311
7312 /* We need to restore these for the handler. */
7313 if (crtl->calls_eh_return)
7314 {
7315 for (i = 0; ; ++i)
7316 {
7317 unsigned regno = EH_RETURN_DATA_REGNO (i);
7318 if (regno == INVALID_REGNUM)
7319 break;
7320 imask |= 1UL << regno;
7321 }
7322 }
7323
7324 /* If any register spilled, then spill the return address also. */
7325 /* ??? This is required by the Digital stack unwind specification
7326 and isn't needed if we're doing Dwarf2 unwinding. */
7327 if (imask || fmask || alpha_ra_ever_killed ())
7328 imask |= (1UL << REG_RA);
7329
7330 *imaskP = imask;
7331 *fmaskP = fmask;
7332 }
7333
7334 int
7335 alpha_sa_size (void)
7336 {
7337 unsigned long mask[2];
7338 int sa_size = 0;
7339 int i, j;
7340
7341 alpha_sa_mask (&mask[0], &mask[1]);
7342
7343 if (TARGET_ABI_UNICOSMK)
7344 {
7345 if (mask[0] || mask[1])
7346 sa_size = 14;
7347 }
7348 else
7349 {
7350 for (j = 0; j < 2; ++j)
7351 for (i = 0; i < 32; ++i)
7352 if ((mask[j] >> i) & 1)
7353 sa_size++;
7354 }
7355
7356 if (TARGET_ABI_UNICOSMK)
7357 {
7358 /* We might not need to generate a frame if we don't make any calls
7359 (including calls to __T3E_MISMATCH if this is a vararg function),
7360 don't have any local variables which require stack slots, don't
7361 use alloca and have not determined that we need a frame for other
7362 reasons. */
7363
7364 alpha_procedure_type
7365 = (sa_size || get_frame_size() != 0
7366 || crtl->outgoing_args_size
7367 || cfun->stdarg || cfun->calls_alloca
7368 || frame_pointer_needed)
7369 ? PT_STACK : PT_REGISTER;
7370
7371 /* Always reserve space for saving callee-saved registers if we
7372 need a frame as required by the calling convention. */
7373 if (alpha_procedure_type == PT_STACK)
7374 sa_size = 14;
7375 }
7376 else if (TARGET_ABI_OPEN_VMS)
7377 {
7378 /* Start with a stack procedure if we make any calls (REG_RA used), or
7379 need a frame pointer, with a register procedure if we otherwise need
7380 at least a slot, and with a null procedure in other cases. */
7381 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7382 alpha_procedure_type = PT_STACK;
7383 else if (get_frame_size() != 0)
7384 alpha_procedure_type = PT_REGISTER;
7385 else
7386 alpha_procedure_type = PT_NULL;
7387
7388 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7389 made the final decision on stack procedure vs register procedure. */
7390 if (alpha_procedure_type == PT_STACK)
7391 sa_size -= 2;
7392
7393 /* Decide whether to refer to objects off our PV via FP or PV.
7394 If we need FP for something else or if we receive a nonlocal
7395 goto (which expects PV to contain the value), we must use PV.
7396 Otherwise, start by assuming we can use FP. */
7397
7398 vms_base_regno
7399 = (frame_pointer_needed
7400 || cfun->has_nonlocal_label
7401 || alpha_procedure_type == PT_STACK
7402 || crtl->outgoing_args_size)
7403 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7404
7405 /* If we want to copy PV into FP, we need to find some register
7406 in which to save FP. */
7407
7408 vms_save_fp_regno = -1;
7409 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7410 for (i = 0; i < 32; i++)
7411 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7412 vms_save_fp_regno = i;
7413
7414 /* A VMS condition handler requires a stack procedure in our
7415 implementation. (not required by the calling standard). */
7416 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7417 || cfun->machine->uses_condition_handler)
7418 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7419 else if (alpha_procedure_type == PT_NULL)
7420 vms_base_regno = REG_PV;
7421
7422 /* Stack unwinding should be done via FP unless we use it for PV. */
7423 vms_unwind_regno = (vms_base_regno == REG_PV
7424 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7425
7426 /* If this is a stack procedure, allow space for saving FP, RA and
7427 a condition handler slot if needed. */
7428 if (alpha_procedure_type == PT_STACK)
7429 sa_size += 2 + cfun->machine->uses_condition_handler;
7430 }
7431 else
7432 {
7433 /* Our size must be even (multiple of 16 bytes). */
7434 if (sa_size & 1)
7435 sa_size++;
7436 }
7437
7438 return sa_size * 8;
7439 }
7440
7441 /* Define the offset between two registers, one to be eliminated,
7442 and the other its replacement, at the start of a routine. */
7443
7444 HOST_WIDE_INT
7445 alpha_initial_elimination_offset (unsigned int from,
7446 unsigned int to ATTRIBUTE_UNUSED)
7447 {
7448 HOST_WIDE_INT ret;
7449
7450 ret = alpha_sa_size ();
7451 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7452
7453 switch (from)
7454 {
7455 case FRAME_POINTER_REGNUM:
7456 break;
7457
7458 case ARG_POINTER_REGNUM:
7459 ret += (ALPHA_ROUND (get_frame_size ()
7460 + crtl->args.pretend_args_size)
7461 - crtl->args.pretend_args_size);
7462 break;
7463
7464 default:
7465 gcc_unreachable ();
7466 }
7467
7468 return ret;
7469 }
7470
7471 #if TARGET_ABI_OPEN_VMS
7472
7473 /* Worker function for TARGET_CAN_ELIMINATE. */
7474
7475 static bool
7476 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7477 {
7478 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7479 alpha_sa_size ();
7480
7481 switch (alpha_procedure_type)
7482 {
7483 case PT_NULL:
7484 /* NULL procedures have no frame of their own and we only
7485 know how to resolve from the current stack pointer. */
7486 return to == STACK_POINTER_REGNUM;
7487
7488 case PT_REGISTER:
7489 case PT_STACK:
7490 /* We always eliminate except to the stack pointer if there is no
7491 usable frame pointer at hand. */
7492 return (to != STACK_POINTER_REGNUM
7493 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7494 }
7495
7496 gcc_unreachable ();
7497 }
7498
7499 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7500 designates the same location as FROM. */
7501
7502 HOST_WIDE_INT
7503 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7504 {
7505 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7506 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7507 on the proper computations and will need the register save area size
7508 in most cases. */
7509
7510 HOST_WIDE_INT sa_size = alpha_sa_size ();
7511
7512 /* PT_NULL procedures have no frame of their own and we only allow
7513 elimination to the stack pointer. This is the argument pointer and we
7514 resolve the soft frame pointer to that as well. */
7515
7516 if (alpha_procedure_type == PT_NULL)
7517 return 0;
7518
7519 /* For a PT_STACK procedure the frame layout looks as follows
7520
7521 -----> decreasing addresses
7522
7523 < size rounded up to 16 | likewise >
7524 --------------#------------------------------+++--------------+++-------#
7525 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7526 --------------#---------------------------------------------------------#
7527 ^ ^ ^ ^
7528 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7529
7530
7531 PT_REGISTER procedures are similar in that they may have a frame of their
7532 own. They have no regs-sa/pv/outgoing-args area.
7533
7534 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7535 to STACK_PTR if need be. */
7536
7537 {
7538 HOST_WIDE_INT offset;
7539 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7540
7541 switch (from)
7542 {
7543 case FRAME_POINTER_REGNUM:
7544 offset = ALPHA_ROUND (sa_size + pv_save_size);
7545 break;
7546 case ARG_POINTER_REGNUM:
7547 offset = (ALPHA_ROUND (sa_size + pv_save_size
7548 + get_frame_size ()
7549 + crtl->args.pretend_args_size)
7550 - crtl->args.pretend_args_size);
7551 break;
7552 default:
7553 gcc_unreachable ();
7554 }
7555
7556 if (to == STACK_POINTER_REGNUM)
7557 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7558
7559 return offset;
7560 }
7561 }
7562
7563 #define COMMON_OBJECT "common_object"
7564
7565 static tree
7566 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7567 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7568 bool *no_add_attrs ATTRIBUTE_UNUSED)
7569 {
7570 tree decl = *node;
7571 gcc_assert (DECL_P (decl));
7572
7573 DECL_COMMON (decl) = 1;
7574 return NULL_TREE;
7575 }
7576
7577 static const struct attribute_spec vms_attribute_table[] =
7578 {
7579 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7580 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler },
7581 { NULL, 0, 0, false, false, false, NULL }
7582 };
7583
7584 void
7585 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7586 unsigned HOST_WIDE_INT size,
7587 unsigned int align)
7588 {
7589 tree attr = DECL_ATTRIBUTES (decl);
7590 fprintf (file, "%s", COMMON_ASM_OP);
7591 assemble_name (file, name);
7592 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7593 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7594 fprintf (file, ",%u", align / BITS_PER_UNIT);
7595 if (attr)
7596 {
7597 attr = lookup_attribute (COMMON_OBJECT, attr);
7598 if (attr)
7599 fprintf (file, ",%s",
7600 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7601 }
7602 fputc ('\n', file);
7603 }
7604
7605 #undef COMMON_OBJECT
7606
7607 #endif
7608
7609 static int
7610 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7611 {
7612 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7613 }
7614
7615 int
7616 alpha_find_lo_sum_using_gp (rtx insn)
7617 {
7618 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7619 }
7620
7621 static int
7622 alpha_does_function_need_gp (void)
7623 {
7624 rtx insn;
7625
7626 /* The GP being variable is an OSF abi thing. */
7627 if (! TARGET_ABI_OSF)
7628 return 0;
7629
7630 /* We need the gp to load the address of __mcount. */
7631 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7632 return 1;
7633
7634 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7635 if (cfun->is_thunk)
7636 return 1;
7637
7638 /* The nonlocal receiver pattern assumes that the gp is valid for
7639 the nested function. Reasonable because it's almost always set
7640 correctly already. For the cases where that's wrong, make sure
7641 the nested function loads its gp on entry. */
7642 if (crtl->has_nonlocal_goto)
7643 return 1;
7644
7645 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7646 Even if we are a static function, we still need to do this in case
7647 our address is taken and passed to something like qsort. */
7648
7649 push_topmost_sequence ();
7650 insn = get_insns ();
7651 pop_topmost_sequence ();
7652
7653 for (; insn; insn = NEXT_INSN (insn))
7654 if (NONDEBUG_INSN_P (insn)
7655 && ! JUMP_TABLE_DATA_P (insn)
7656 && GET_CODE (PATTERN (insn)) != USE
7657 && GET_CODE (PATTERN (insn)) != CLOBBER
7658 && get_attr_usegp (insn))
7659 return 1;
7660
7661 return 0;
7662 }
7663
7664 \f
7665 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7666 sequences. */
7667
7668 static rtx
7669 set_frame_related_p (void)
7670 {
7671 rtx seq = get_insns ();
7672 rtx insn;
7673
7674 end_sequence ();
7675
7676 if (!seq)
7677 return NULL_RTX;
7678
7679 if (INSN_P (seq))
7680 {
7681 insn = seq;
7682 while (insn != NULL_RTX)
7683 {
7684 RTX_FRAME_RELATED_P (insn) = 1;
7685 insn = NEXT_INSN (insn);
7686 }
7687 seq = emit_insn (seq);
7688 }
7689 else
7690 {
7691 seq = emit_insn (seq);
7692 RTX_FRAME_RELATED_P (seq) = 1;
7693 }
7694 return seq;
7695 }
7696
7697 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7698
7699 /* Generates a store with the proper unwind info attached. VALUE is
7700 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7701 contains SP+FRAME_BIAS, and that is the unwind info that should be
7702 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7703 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7704
7705 static void
7706 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7707 HOST_WIDE_INT base_ofs, rtx frame_reg)
7708 {
7709 rtx addr, mem, insn;
7710
7711 addr = plus_constant (base_reg, base_ofs);
7712 mem = gen_rtx_MEM (DImode, addr);
7713 set_mem_alias_set (mem, alpha_sr_alias_set);
7714
7715 insn = emit_move_insn (mem, value);
7716 RTX_FRAME_RELATED_P (insn) = 1;
7717
7718 if (frame_bias || value != frame_reg)
7719 {
7720 if (frame_bias)
7721 {
7722 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7723 mem = gen_rtx_MEM (DImode, addr);
7724 }
7725
7726 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7727 gen_rtx_SET (VOIDmode, mem, frame_reg));
7728 }
7729 }
7730
7731 static void
7732 emit_frame_store (unsigned int regno, rtx base_reg,
7733 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7734 {
7735 rtx reg = gen_rtx_REG (DImode, regno);
7736 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7737 }
7738
7739 /* Compute the frame size. SIZE is the size of the "naked" frame
7740 and SA_SIZE is the size of the register save area. */
7741
7742 static HOST_WIDE_INT
7743 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7744 {
7745 if (TARGET_ABI_OPEN_VMS)
7746 return ALPHA_ROUND (sa_size
7747 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7748 + size
7749 + crtl->args.pretend_args_size);
7750 else if (TARGET_ABI_UNICOSMK)
7751 /* We have to allocate space for the DSIB if we generate a frame. */
7752 return ALPHA_ROUND (sa_size
7753 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7754 + ALPHA_ROUND (size
7755 + crtl->outgoing_args_size);
7756 else
7757 return ALPHA_ROUND (crtl->outgoing_args_size)
7758 + sa_size
7759 + ALPHA_ROUND (size
7760 + crtl->args.pretend_args_size);
7761 }
7762
7763 /* Write function prologue. */
7764
7765 /* On vms we have two kinds of functions:
7766
7767 - stack frame (PROC_STACK)
7768 these are 'normal' functions with local vars and which are
7769 calling other functions
7770 - register frame (PROC_REGISTER)
7771 keeps all data in registers, needs no stack
7772
7773 We must pass this to the assembler so it can generate the
7774 proper pdsc (procedure descriptor)
7775 This is done with the '.pdesc' command.
7776
7777 On not-vms, we don't really differentiate between the two, as we can
7778 simply allocate stack without saving registers. */
7779
7780 void
7781 alpha_expand_prologue (void)
7782 {
7783 /* Registers to save. */
7784 unsigned long imask = 0;
7785 unsigned long fmask = 0;
7786 /* Stack space needed for pushing registers clobbered by us. */
7787 HOST_WIDE_INT sa_size;
7788 /* Complete stack size needed. */
7789 HOST_WIDE_INT frame_size;
7790 /* Probed stack size; it additionally includes the size of
7791 the "reserve region" if any. */
7792 HOST_WIDE_INT probed_size;
7793 /* Offset from base reg to register save area. */
7794 HOST_WIDE_INT reg_offset;
7795 rtx sa_reg;
7796 int i;
7797
7798 sa_size = alpha_sa_size ();
7799 frame_size = compute_frame_size (get_frame_size (), sa_size);
7800
7801 if (flag_stack_usage)
7802 current_function_static_stack_size = frame_size;
7803
7804 if (TARGET_ABI_OPEN_VMS)
7805 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7806 else
7807 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7808
7809 alpha_sa_mask (&imask, &fmask);
7810
7811 /* Emit an insn to reload GP, if needed. */
7812 if (TARGET_ABI_OSF)
7813 {
7814 alpha_function_needs_gp = alpha_does_function_need_gp ();
7815 if (alpha_function_needs_gp)
7816 emit_insn (gen_prologue_ldgp ());
7817 }
7818
7819 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7820 the call to mcount ourselves, rather than having the linker do it
7821 magically in response to -pg. Since _mcount has special linkage,
7822 don't represent the call as a call. */
7823 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7824 emit_insn (gen_prologue_mcount ());
7825
7826 if (TARGET_ABI_UNICOSMK)
7827 unicosmk_gen_dsib (&imask);
7828
7829 /* Adjust the stack by the frame size. If the frame size is > 4096
7830 bytes, we need to be sure we probe somewhere in the first and last
7831 4096 bytes (we can probably get away without the latter test) and
7832 every 8192 bytes in between. If the frame size is > 32768, we
7833 do this in a loop. Otherwise, we generate the explicit probe
7834 instructions.
7835
7836 Note that we are only allowed to adjust sp once in the prologue. */
7837
7838 probed_size = frame_size;
7839 if (flag_stack_check)
7840 probed_size += STACK_CHECK_PROTECT;
7841
7842 if (probed_size <= 32768)
7843 {
7844 if (probed_size > 4096)
7845 {
7846 int probed;
7847
7848 for (probed = 4096; probed < probed_size; probed += 8192)
7849 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7850 ? -probed + 64
7851 : -probed)));
7852
7853 /* We only have to do this probe if we aren't saving registers or
7854 if we are probing beyond the frame because of -fstack-check. */
7855 if ((sa_size == 0 && probed_size > probed - 4096)
7856 || flag_stack_check)
7857 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7858 }
7859
7860 if (frame_size != 0)
7861 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7862 GEN_INT (TARGET_ABI_UNICOSMK
7863 ? -frame_size + 64
7864 : -frame_size))));
7865 }
7866 else
7867 {
7868 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7869 number of 8192 byte blocks to probe. We then probe each block
7870 in the loop and then set SP to the proper location. If the
7871 amount remaining is > 4096, we have to do one more probe if we
7872 are not saving any registers or if we are probing beyond the
7873 frame because of -fstack-check. */
7874
7875 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7876 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7877 rtx ptr = gen_rtx_REG (DImode, 22);
7878 rtx count = gen_rtx_REG (DImode, 23);
7879 rtx seq;
7880
7881 emit_move_insn (count, GEN_INT (blocks));
7882 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7883 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7884
7885 /* Because of the difficulty in emitting a new basic block this
7886 late in the compilation, generate the loop as a single insn. */
7887 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7888
7889 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7890 {
7891 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7892 MEM_VOLATILE_P (last) = 1;
7893 emit_move_insn (last, const0_rtx);
7894 }
7895
7896 if (TARGET_ABI_WINDOWS_NT || flag_stack_check)
7897 {
7898 /* For NT stack unwind (done by 'reverse execution'), it's
7899 not OK to take the result of a loop, even though the value
7900 is already in ptr, so we reload it via a single operation
7901 and subtract it to sp.
7902
7903 Same if -fstack-check is specified, because the probed stack
7904 size is not equal to the frame size.
7905
7906 Yes, that's correct -- we have to reload the whole constant
7907 into a temporary via ldah+lda then subtract from sp. */
7908
7909 HOST_WIDE_INT lo, hi;
7910 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7911 hi = frame_size - lo;
7912
7913 emit_move_insn (ptr, GEN_INT (hi));
7914 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7915 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7916 ptr));
7917 }
7918 else
7919 {
7920 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7921 GEN_INT (-leftover)));
7922 }
7923
7924 /* This alternative is special, because the DWARF code cannot
7925 possibly intuit through the loop above. So we invent this
7926 note it looks at instead. */
7927 RTX_FRAME_RELATED_P (seq) = 1;
7928 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7929 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7930 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7931 GEN_INT (TARGET_ABI_UNICOSMK
7932 ? -frame_size + 64
7933 : -frame_size))));
7934 }
7935
7936 if (!TARGET_ABI_UNICOSMK)
7937 {
7938 HOST_WIDE_INT sa_bias = 0;
7939
7940 /* Cope with very large offsets to the register save area. */
7941 sa_reg = stack_pointer_rtx;
7942 if (reg_offset + sa_size > 0x8000)
7943 {
7944 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7945 rtx sa_bias_rtx;
7946
7947 if (low + sa_size <= 0x8000)
7948 sa_bias = reg_offset - low, reg_offset = low;
7949 else
7950 sa_bias = reg_offset, reg_offset = 0;
7951
7952 sa_reg = gen_rtx_REG (DImode, 24);
7953 sa_bias_rtx = GEN_INT (sa_bias);
7954
7955 if (add_operand (sa_bias_rtx, DImode))
7956 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7957 else
7958 {
7959 emit_move_insn (sa_reg, sa_bias_rtx);
7960 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7961 }
7962 }
7963
7964 /* Save regs in stack order. Beginning with VMS PV. */
7965 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7966 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7967
7968 /* Save register RA next. */
7969 if (imask & (1UL << REG_RA))
7970 {
7971 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7972 imask &= ~(1UL << REG_RA);
7973 reg_offset += 8;
7974 }
7975
7976 /* Now save any other registers required to be saved. */
7977 for (i = 0; i < 31; i++)
7978 if (imask & (1UL << i))
7979 {
7980 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7981 reg_offset += 8;
7982 }
7983
7984 for (i = 0; i < 31; i++)
7985 if (fmask & (1UL << i))
7986 {
7987 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7988 reg_offset += 8;
7989 }
7990 }
7991 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7992 {
7993 /* The standard frame on the T3E includes space for saving registers.
7994 We just have to use it. We don't have to save the return address and
7995 the old frame pointer here - they are saved in the DSIB. */
7996
7997 reg_offset = -56;
7998 for (i = 9; i < 15; i++)
7999 if (imask & (1UL << i))
8000 {
8001 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
8002 reg_offset -= 8;
8003 }
8004 for (i = 2; i < 10; i++)
8005 if (fmask & (1UL << i))
8006 {
8007 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
8008 reg_offset -= 8;
8009 }
8010 }
8011
8012 if (TARGET_ABI_OPEN_VMS)
8013 {
8014 /* Register frame procedures save the fp. */
8015 if (alpha_procedure_type == PT_REGISTER)
8016 {
8017 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8018 hard_frame_pointer_rtx);
8019 add_reg_note (insn, REG_CFA_REGISTER, NULL);
8020 RTX_FRAME_RELATED_P (insn) = 1;
8021 }
8022
8023 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8024 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8025 gen_rtx_REG (DImode, REG_PV)));
8026
8027 if (alpha_procedure_type != PT_NULL
8028 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8029 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8030
8031 /* If we have to allocate space for outgoing args, do it now. */
8032 if (crtl->outgoing_args_size != 0)
8033 {
8034 rtx seq
8035 = emit_move_insn (stack_pointer_rtx,
8036 plus_constant
8037 (hard_frame_pointer_rtx,
8038 - (ALPHA_ROUND
8039 (crtl->outgoing_args_size))));
8040
8041 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8042 if ! frame_pointer_needed. Setting the bit will change the CFA
8043 computation rule to use sp again, which would be wrong if we had
8044 frame_pointer_needed, as this means sp might move unpredictably
8045 later on.
8046
8047 Also, note that
8048 frame_pointer_needed
8049 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8050 and
8051 crtl->outgoing_args_size != 0
8052 => alpha_procedure_type != PT_NULL,
8053
8054 so when we are not setting the bit here, we are guaranteed to
8055 have emitted an FRP frame pointer update just before. */
8056 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8057 }
8058 }
8059 else if (!TARGET_ABI_UNICOSMK)
8060 {
8061 /* If we need a frame pointer, set it from the stack pointer. */
8062 if (frame_pointer_needed)
8063 {
8064 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8065 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8066 else
8067 /* This must always be the last instruction in the
8068 prologue, thus we emit a special move + clobber. */
8069 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8070 stack_pointer_rtx, sa_reg)));
8071 }
8072 }
8073
8074 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8075 the prologue, for exception handling reasons, we cannot do this for
8076 any insn that might fault. We could prevent this for mems with a
8077 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8078 have to prevent all such scheduling with a blockage.
8079
8080 Linux, on the other hand, never bothered to implement OSF/1's
8081 exception handling, and so doesn't care about such things. Anyone
8082 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8083
8084 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8085 emit_insn (gen_blockage ());
8086 }
8087
8088 /* Count the number of .file directives, so that .loc is up to date. */
8089 int num_source_filenames = 0;
8090
8091 /* Output the textual info surrounding the prologue. */
8092
8093 void
8094 alpha_start_function (FILE *file, const char *fnname,
8095 tree decl ATTRIBUTE_UNUSED)
8096 {
8097 unsigned long imask = 0;
8098 unsigned long fmask = 0;
8099 /* Stack space needed for pushing registers clobbered by us. */
8100 HOST_WIDE_INT sa_size;
8101 /* Complete stack size needed. */
8102 unsigned HOST_WIDE_INT frame_size;
8103 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
8104 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8105 ? 524288
8106 : 1UL << 31;
8107 /* Offset from base reg to register save area. */
8108 HOST_WIDE_INT reg_offset;
8109 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8110 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8111 int i;
8112
8113 /* Don't emit an extern directive for functions defined in the same file. */
8114 if (TARGET_ABI_UNICOSMK)
8115 {
8116 tree name_tree;
8117 name_tree = get_identifier (fnname);
8118 TREE_ASM_WRITTEN (name_tree) = 1;
8119 }
8120
8121 #if TARGET_ABI_OPEN_VMS
8122 if (vms_debug_main
8123 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8124 {
8125 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8126 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8127 switch_to_section (text_section);
8128 vms_debug_main = NULL;
8129 }
8130 #endif
8131
8132 alpha_fnname = fnname;
8133 sa_size = alpha_sa_size ();
8134 frame_size = compute_frame_size (get_frame_size (), sa_size);
8135
8136 if (TARGET_ABI_OPEN_VMS)
8137 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8138 else
8139 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8140
8141 alpha_sa_mask (&imask, &fmask);
8142
8143 /* Ecoff can handle multiple .file directives, so put out file and lineno.
8144 We have to do that before the .ent directive as we cannot switch
8145 files within procedures with native ecoff because line numbers are
8146 linked to procedure descriptors.
8147 Outputting the lineno helps debugging of one line functions as they
8148 would otherwise get no line number at all. Please note that we would
8149 like to put out last_linenum from final.c, but it is not accessible. */
8150
8151 if (write_symbols == SDB_DEBUG)
8152 {
8153 #ifdef ASM_OUTPUT_SOURCE_FILENAME
8154 ASM_OUTPUT_SOURCE_FILENAME (file,
8155 DECL_SOURCE_FILE (current_function_decl));
8156 #endif
8157 #ifdef SDB_OUTPUT_SOURCE_LINE
8158 if (debug_info_level != DINFO_LEVEL_TERSE)
8159 SDB_OUTPUT_SOURCE_LINE (file,
8160 DECL_SOURCE_LINE (current_function_decl));
8161 #endif
8162 }
8163
8164 /* Issue function start and label. */
8165 if (TARGET_ABI_OPEN_VMS
8166 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8167 {
8168 fputs ("\t.ent ", file);
8169 assemble_name (file, fnname);
8170 putc ('\n', file);
8171
8172 /* If the function needs GP, we'll write the "..ng" label there.
8173 Otherwise, do it here. */
8174 if (TARGET_ABI_OSF
8175 && ! alpha_function_needs_gp
8176 && ! cfun->is_thunk)
8177 {
8178 putc ('$', file);
8179 assemble_name (file, fnname);
8180 fputs ("..ng:\n", file);
8181 }
8182 }
8183 /* Nested functions on VMS that are potentially called via trampoline
8184 get a special transfer entry point that loads the called functions
8185 procedure descriptor and static chain. */
8186 if (TARGET_ABI_OPEN_VMS
8187 && !TREE_PUBLIC (decl)
8188 && DECL_CONTEXT (decl)
8189 && !TYPE_P (DECL_CONTEXT (decl)))
8190 {
8191 strcpy (tramp_label, fnname);
8192 strcat (tramp_label, "..tr");
8193 ASM_OUTPUT_LABEL (file, tramp_label);
8194 fprintf (file, "\tldq $1,24($27)\n");
8195 fprintf (file, "\tldq $27,16($27)\n");
8196 }
8197
8198 strcpy (entry_label, fnname);
8199 if (TARGET_ABI_OPEN_VMS)
8200 strcat (entry_label, "..en");
8201
8202 /* For public functions, the label must be globalized by appending an
8203 additional colon. */
8204 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8205 strcat (entry_label, ":");
8206
8207 ASM_OUTPUT_LABEL (file, entry_label);
8208 inside_function = TRUE;
8209
8210 if (TARGET_ABI_OPEN_VMS)
8211 fprintf (file, "\t.base $%d\n", vms_base_regno);
8212
8213 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8214 && !flag_inhibit_size_directive)
8215 {
8216 /* Set flags in procedure descriptor to request IEEE-conformant
8217 math-library routines. The value we set it to is PDSC_EXC_IEEE
8218 (/usr/include/pdsc.h). */
8219 fputs ("\t.eflag 48\n", file);
8220 }
8221
8222 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8223 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8224 alpha_arg_offset = -frame_size + 48;
8225
8226 /* Describe our frame. If the frame size is larger than an integer,
8227 print it as zero to avoid an assembler error. We won't be
8228 properly describing such a frame, but that's the best we can do. */
8229 if (TARGET_ABI_UNICOSMK)
8230 ;
8231 else if (TARGET_ABI_OPEN_VMS)
8232 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8233 HOST_WIDE_INT_PRINT_DEC "\n",
8234 vms_unwind_regno,
8235 frame_size >= (1UL << 31) ? 0 : frame_size,
8236 reg_offset);
8237 else if (!flag_inhibit_size_directive)
8238 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8239 (frame_pointer_needed
8240 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8241 frame_size >= max_frame_size ? 0 : frame_size,
8242 crtl->args.pretend_args_size);
8243
8244 /* Describe which registers were spilled. */
8245 if (TARGET_ABI_UNICOSMK)
8246 ;
8247 else if (TARGET_ABI_OPEN_VMS)
8248 {
8249 if (imask)
8250 /* ??? Does VMS care if mask contains ra? The old code didn't
8251 set it, so I don't here. */
8252 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8253 if (fmask)
8254 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8255 if (alpha_procedure_type == PT_REGISTER)
8256 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8257 }
8258 else if (!flag_inhibit_size_directive)
8259 {
8260 if (imask)
8261 {
8262 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8263 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8264
8265 for (i = 0; i < 32; ++i)
8266 if (imask & (1UL << i))
8267 reg_offset += 8;
8268 }
8269
8270 if (fmask)
8271 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8272 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8273 }
8274
8275 #if TARGET_ABI_OPEN_VMS
8276 /* If a user condition handler has been installed at some point, emit
8277 the procedure descriptor bits to point the Condition Handling Facility
8278 at the indirection wrapper, and state the fp offset at which the user
8279 handler may be found. */
8280 if (cfun->machine->uses_condition_handler)
8281 {
8282 fprintf (file, "\t.handler __gcc_shell_handler\n");
8283 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8284 }
8285
8286 /* Ifdef'ed cause link_section are only available then. */
8287 switch_to_section (readonly_data_section);
8288 fprintf (file, "\t.align 3\n");
8289 assemble_name (file, fnname); fputs ("..na:\n", file);
8290 fputs ("\t.ascii \"", file);
8291 assemble_name (file, fnname);
8292 fputs ("\\0\"\n", file);
8293 alpha_need_linkage (fnname, 1);
8294 switch_to_section (text_section);
8295 #endif
8296 }
8297
8298 /* Emit the .prologue note at the scheduled end of the prologue. */
8299
8300 static void
8301 alpha_output_function_end_prologue (FILE *file)
8302 {
8303 if (TARGET_ABI_UNICOSMK)
8304 ;
8305 else if (TARGET_ABI_OPEN_VMS)
8306 fputs ("\t.prologue\n", file);
8307 else if (TARGET_ABI_WINDOWS_NT)
8308 fputs ("\t.prologue 0\n", file);
8309 else if (!flag_inhibit_size_directive)
8310 fprintf (file, "\t.prologue %d\n",
8311 alpha_function_needs_gp || cfun->is_thunk);
8312 }
8313
8314 /* Write function epilogue. */
8315
8316 void
8317 alpha_expand_epilogue (void)
8318 {
8319 /* Registers to save. */
8320 unsigned long imask = 0;
8321 unsigned long fmask = 0;
8322 /* Stack space needed for pushing registers clobbered by us. */
8323 HOST_WIDE_INT sa_size;
8324 /* Complete stack size needed. */
8325 HOST_WIDE_INT frame_size;
8326 /* Offset from base reg to register save area. */
8327 HOST_WIDE_INT reg_offset;
8328 int fp_is_frame_pointer, fp_offset;
8329 rtx sa_reg, sa_reg_exp = NULL;
8330 rtx sp_adj1, sp_adj2, mem, reg, insn;
8331 rtx eh_ofs;
8332 rtx cfa_restores = NULL_RTX;
8333 int i;
8334
8335 sa_size = alpha_sa_size ();
8336 frame_size = compute_frame_size (get_frame_size (), sa_size);
8337
8338 if (TARGET_ABI_OPEN_VMS)
8339 {
8340 if (alpha_procedure_type == PT_STACK)
8341 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8342 else
8343 reg_offset = 0;
8344 }
8345 else
8346 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8347
8348 alpha_sa_mask (&imask, &fmask);
8349
8350 fp_is_frame_pointer
8351 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8352 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8353 fp_offset = 0;
8354 sa_reg = stack_pointer_rtx;
8355
8356 if (crtl->calls_eh_return)
8357 eh_ofs = EH_RETURN_STACKADJ_RTX;
8358 else
8359 eh_ofs = NULL_RTX;
8360
8361 if (!TARGET_ABI_UNICOSMK && sa_size)
8362 {
8363 /* If we have a frame pointer, restore SP from it. */
8364 if ((TARGET_ABI_OPEN_VMS
8365 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8366 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8367 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8368
8369 /* Cope with very large offsets to the register save area. */
8370 if (reg_offset + sa_size > 0x8000)
8371 {
8372 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8373 HOST_WIDE_INT bias;
8374
8375 if (low + sa_size <= 0x8000)
8376 bias = reg_offset - low, reg_offset = low;
8377 else
8378 bias = reg_offset, reg_offset = 0;
8379
8380 sa_reg = gen_rtx_REG (DImode, 22);
8381 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8382
8383 emit_move_insn (sa_reg, sa_reg_exp);
8384 }
8385
8386 /* Restore registers in order, excepting a true frame pointer. */
8387
8388 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8389 if (! eh_ofs)
8390 set_mem_alias_set (mem, alpha_sr_alias_set);
8391 reg = gen_rtx_REG (DImode, REG_RA);
8392 emit_move_insn (reg, mem);
8393 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8394
8395 reg_offset += 8;
8396 imask &= ~(1UL << REG_RA);
8397
8398 for (i = 0; i < 31; ++i)
8399 if (imask & (1UL << i))
8400 {
8401 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8402 fp_offset = reg_offset;
8403 else
8404 {
8405 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8406 set_mem_alias_set (mem, alpha_sr_alias_set);
8407 reg = gen_rtx_REG (DImode, i);
8408 emit_move_insn (reg, mem);
8409 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8410 cfa_restores);
8411 }
8412 reg_offset += 8;
8413 }
8414
8415 for (i = 0; i < 31; ++i)
8416 if (fmask & (1UL << i))
8417 {
8418 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8419 set_mem_alias_set (mem, alpha_sr_alias_set);
8420 reg = gen_rtx_REG (DFmode, i+32);
8421 emit_move_insn (reg, mem);
8422 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8423 reg_offset += 8;
8424 }
8425 }
8426 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8427 {
8428 /* Restore callee-saved general-purpose registers. */
8429
8430 reg_offset = -56;
8431
8432 for (i = 9; i < 15; i++)
8433 if (imask & (1UL << i))
8434 {
8435 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8436 reg_offset));
8437 set_mem_alias_set (mem, alpha_sr_alias_set);
8438 reg = gen_rtx_REG (DImode, i);
8439 emit_move_insn (reg, mem);
8440 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8441 reg_offset -= 8;
8442 }
8443
8444 for (i = 2; i < 10; i++)
8445 if (fmask & (1UL << i))
8446 {
8447 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8448 reg_offset));
8449 set_mem_alias_set (mem, alpha_sr_alias_set);
8450 reg = gen_rtx_REG (DFmode, i+32);
8451 emit_move_insn (reg, mem);
8452 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8453 reg_offset -= 8;
8454 }
8455
8456 /* Restore the return address from the DSIB. */
8457 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8458 set_mem_alias_set (mem, alpha_sr_alias_set);
8459 reg = gen_rtx_REG (DImode, REG_RA);
8460 emit_move_insn (reg, mem);
8461 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8462 }
8463
8464 if (frame_size || eh_ofs)
8465 {
8466 sp_adj1 = stack_pointer_rtx;
8467
8468 if (eh_ofs)
8469 {
8470 sp_adj1 = gen_rtx_REG (DImode, 23);
8471 emit_move_insn (sp_adj1,
8472 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8473 }
8474
8475 /* If the stack size is large, begin computation into a temporary
8476 register so as not to interfere with a potential fp restore,
8477 which must be consecutive with an SP restore. */
8478 if (frame_size < 32768
8479 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8480 sp_adj2 = GEN_INT (frame_size);
8481 else if (TARGET_ABI_UNICOSMK)
8482 {
8483 sp_adj1 = gen_rtx_REG (DImode, 23);
8484 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8485 sp_adj2 = const0_rtx;
8486 }
8487 else if (frame_size < 0x40007fffL)
8488 {
8489 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8490
8491 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8492 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8493 sp_adj1 = sa_reg;
8494 else
8495 {
8496 sp_adj1 = gen_rtx_REG (DImode, 23);
8497 emit_move_insn (sp_adj1, sp_adj2);
8498 }
8499 sp_adj2 = GEN_INT (low);
8500 }
8501 else
8502 {
8503 rtx tmp = gen_rtx_REG (DImode, 23);
8504 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8505 if (!sp_adj2)
8506 {
8507 /* We can't drop new things to memory this late, afaik,
8508 so build it up by pieces. */
8509 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8510 -(frame_size < 0));
8511 gcc_assert (sp_adj2);
8512 }
8513 }
8514
8515 /* From now on, things must be in order. So emit blockages. */
8516
8517 /* Restore the frame pointer. */
8518 if (TARGET_ABI_UNICOSMK)
8519 {
8520 emit_insn (gen_blockage ());
8521 mem = gen_rtx_MEM (DImode,
8522 plus_constant (hard_frame_pointer_rtx, -16));
8523 set_mem_alias_set (mem, alpha_sr_alias_set);
8524 emit_move_insn (hard_frame_pointer_rtx, mem);
8525 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8526 hard_frame_pointer_rtx, cfa_restores);
8527 }
8528 else if (fp_is_frame_pointer)
8529 {
8530 emit_insn (gen_blockage ());
8531 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8532 set_mem_alias_set (mem, alpha_sr_alias_set);
8533 emit_move_insn (hard_frame_pointer_rtx, mem);
8534 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8535 hard_frame_pointer_rtx, cfa_restores);
8536 }
8537 else if (TARGET_ABI_OPEN_VMS)
8538 {
8539 emit_insn (gen_blockage ());
8540 emit_move_insn (hard_frame_pointer_rtx,
8541 gen_rtx_REG (DImode, vms_save_fp_regno));
8542 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8543 hard_frame_pointer_rtx, cfa_restores);
8544 }
8545
8546 /* Restore the stack pointer. */
8547 emit_insn (gen_blockage ());
8548 if (sp_adj2 == const0_rtx)
8549 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8550 else
8551 insn = emit_move_insn (stack_pointer_rtx,
8552 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8553 REG_NOTES (insn) = cfa_restores;
8554 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8555 RTX_FRAME_RELATED_P (insn) = 1;
8556 }
8557 else
8558 {
8559 gcc_assert (cfa_restores == NULL);
8560
8561 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8562 {
8563 emit_insn (gen_blockage ());
8564 insn = emit_move_insn (hard_frame_pointer_rtx,
8565 gen_rtx_REG (DImode, vms_save_fp_regno));
8566 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8567 RTX_FRAME_RELATED_P (insn) = 1;
8568 }
8569 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8570 {
8571 /* Decrement the frame pointer if the function does not have a
8572 frame. */
8573 emit_insn (gen_blockage ());
8574 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8575 hard_frame_pointer_rtx, constm1_rtx));
8576 }
8577 }
8578 }
8579 \f
8580 /* Output the rest of the textual info surrounding the epilogue. */
8581
8582 void
8583 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8584 {
8585 rtx insn;
8586
8587 /* We output a nop after noreturn calls at the very end of the function to
8588 ensure that the return address always remains in the caller's code range,
8589 as not doing so might confuse unwinding engines. */
8590 insn = get_last_insn ();
8591 if (!INSN_P (insn))
8592 insn = prev_active_insn (insn);
8593 if (insn && CALL_P (insn))
8594 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8595
8596 #if TARGET_ABI_OPEN_VMS
8597 alpha_write_linkage (file, fnname, decl);
8598 #endif
8599
8600 /* End the function. */
8601 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8602 {
8603 fputs ("\t.end ", file);
8604 assemble_name (file, fnname);
8605 putc ('\n', file);
8606 }
8607 inside_function = FALSE;
8608
8609 /* Output jump tables and the static subroutine information block. */
8610 if (TARGET_ABI_UNICOSMK)
8611 {
8612 unicosmk_output_ssib (file, fnname);
8613 unicosmk_output_deferred_case_vectors (file);
8614 }
8615 }
8616
8617 #if TARGET_ABI_OPEN_VMS
8618 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8619 {
8620 #ifdef DO_CRTL_NAMES
8621 DO_CRTL_NAMES;
8622 #endif
8623 }
8624 #endif
8625
8626 #if TARGET_ABI_OSF
8627 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8628
8629 In order to avoid the hordes of differences between generated code
8630 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8631 lots of code loading up large constants, generate rtl and emit it
8632 instead of going straight to text.
8633
8634 Not sure why this idea hasn't been explored before... */
8635
8636 static void
8637 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8638 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8639 tree function)
8640 {
8641 HOST_WIDE_INT hi, lo;
8642 rtx this_rtx, insn, funexp;
8643
8644 /* We always require a valid GP. */
8645 emit_insn (gen_prologue_ldgp ());
8646 emit_note (NOTE_INSN_PROLOGUE_END);
8647
8648 /* Find the "this" pointer. If the function returns a structure,
8649 the structure return pointer is in $16. */
8650 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8651 this_rtx = gen_rtx_REG (Pmode, 17);
8652 else
8653 this_rtx = gen_rtx_REG (Pmode, 16);
8654
8655 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8656 entire constant for the add. */
8657 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8658 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8659 if (hi + lo == delta)
8660 {
8661 if (hi)
8662 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8663 if (lo)
8664 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8665 }
8666 else
8667 {
8668 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8669 delta, -(delta < 0));
8670 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8671 }
8672
8673 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8674 if (vcall_offset)
8675 {
8676 rtx tmp, tmp2;
8677
8678 tmp = gen_rtx_REG (Pmode, 0);
8679 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8680
8681 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8682 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8683 if (hi + lo == vcall_offset)
8684 {
8685 if (hi)
8686 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8687 }
8688 else
8689 {
8690 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8691 vcall_offset, -(vcall_offset < 0));
8692 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8693 lo = 0;
8694 }
8695 if (lo)
8696 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8697 else
8698 tmp2 = tmp;
8699 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8700
8701 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8702 }
8703
8704 /* Generate a tail call to the target function. */
8705 if (! TREE_USED (function))
8706 {
8707 assemble_external (function);
8708 TREE_USED (function) = 1;
8709 }
8710 funexp = XEXP (DECL_RTL (function), 0);
8711 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8712 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8713 SIBLING_CALL_P (insn) = 1;
8714
8715 /* Run just enough of rest_of_compilation to get the insns emitted.
8716 There's not really enough bulk here to make other passes such as
8717 instruction scheduling worth while. Note that use_thunk calls
8718 assemble_start_function and assemble_end_function. */
8719 insn = get_insns ();
8720 insn_locators_alloc ();
8721 shorten_branches (insn);
8722 final_start_function (insn, file, 1);
8723 final (insn, file, 1);
8724 final_end_function ();
8725 }
8726 #endif /* TARGET_ABI_OSF */
8727 \f
8728 /* Debugging support. */
8729
8730 #include "gstab.h"
8731
8732 /* Count the number of sdb related labels are generated (to find block
8733 start and end boundaries). */
8734
8735 int sdb_label_count = 0;
8736
8737 /* Name of the file containing the current function. */
8738
8739 static const char *current_function_file = "";
8740
8741 /* Offsets to alpha virtual arg/local debugging pointers. */
8742
8743 long alpha_arg_offset;
8744 long alpha_auto_offset;
8745 \f
8746 /* Emit a new filename to a stream. */
8747
8748 void
8749 alpha_output_filename (FILE *stream, const char *name)
8750 {
8751 static int first_time = TRUE;
8752
8753 if (first_time)
8754 {
8755 first_time = FALSE;
8756 ++num_source_filenames;
8757 current_function_file = name;
8758 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8759 output_quoted_string (stream, name);
8760 fprintf (stream, "\n");
8761 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8762 fprintf (stream, "\t#@stabs\n");
8763 }
8764
8765 else if (write_symbols == DBX_DEBUG)
8766 /* dbxout.c will emit an appropriate .stabs directive. */
8767 return;
8768
8769 else if (name != current_function_file
8770 && strcmp (name, current_function_file) != 0)
8771 {
8772 if (inside_function && ! TARGET_GAS)
8773 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8774 else
8775 {
8776 ++num_source_filenames;
8777 current_function_file = name;
8778 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8779 }
8780
8781 output_quoted_string (stream, name);
8782 fprintf (stream, "\n");
8783 }
8784 }
8785 \f
8786 /* Structure to show the current status of registers and memory. */
8787
8788 struct shadow_summary
8789 {
8790 struct {
8791 unsigned int i : 31; /* Mask of int regs */
8792 unsigned int fp : 31; /* Mask of fp regs */
8793 unsigned int mem : 1; /* mem == imem | fpmem */
8794 } used, defd;
8795 };
8796
8797 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8798 to the summary structure. SET is nonzero if the insn is setting the
8799 object, otherwise zero. */
8800
8801 static void
8802 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8803 {
8804 const char *format_ptr;
8805 int i, j;
8806
8807 if (x == 0)
8808 return;
8809
8810 switch (GET_CODE (x))
8811 {
8812 /* ??? Note that this case would be incorrect if the Alpha had a
8813 ZERO_EXTRACT in SET_DEST. */
8814 case SET:
8815 summarize_insn (SET_SRC (x), sum, 0);
8816 summarize_insn (SET_DEST (x), sum, 1);
8817 break;
8818
8819 case CLOBBER:
8820 summarize_insn (XEXP (x, 0), sum, 1);
8821 break;
8822
8823 case USE:
8824 summarize_insn (XEXP (x, 0), sum, 0);
8825 break;
8826
8827 case ASM_OPERANDS:
8828 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8829 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8830 break;
8831
8832 case PARALLEL:
8833 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8834 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8835 break;
8836
8837 case SUBREG:
8838 summarize_insn (SUBREG_REG (x), sum, 0);
8839 break;
8840
8841 case REG:
8842 {
8843 int regno = REGNO (x);
8844 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8845
8846 if (regno == 31 || regno == 63)
8847 break;
8848
8849 if (set)
8850 {
8851 if (regno < 32)
8852 sum->defd.i |= mask;
8853 else
8854 sum->defd.fp |= mask;
8855 }
8856 else
8857 {
8858 if (regno < 32)
8859 sum->used.i |= mask;
8860 else
8861 sum->used.fp |= mask;
8862 }
8863 }
8864 break;
8865
8866 case MEM:
8867 if (set)
8868 sum->defd.mem = 1;
8869 else
8870 sum->used.mem = 1;
8871
8872 /* Find the regs used in memory address computation: */
8873 summarize_insn (XEXP (x, 0), sum, 0);
8874 break;
8875
8876 case CONST_INT: case CONST_DOUBLE:
8877 case SYMBOL_REF: case LABEL_REF: case CONST:
8878 case SCRATCH: case ASM_INPUT:
8879 break;
8880
8881 /* Handle common unary and binary ops for efficiency. */
8882 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8883 case MOD: case UDIV: case UMOD: case AND: case IOR:
8884 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8885 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8886 case NE: case EQ: case GE: case GT: case LE:
8887 case LT: case GEU: case GTU: case LEU: case LTU:
8888 summarize_insn (XEXP (x, 0), sum, 0);
8889 summarize_insn (XEXP (x, 1), sum, 0);
8890 break;
8891
8892 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8893 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8894 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8895 case SQRT: case FFS:
8896 summarize_insn (XEXP (x, 0), sum, 0);
8897 break;
8898
8899 default:
8900 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8901 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8902 switch (format_ptr[i])
8903 {
8904 case 'e':
8905 summarize_insn (XEXP (x, i), sum, 0);
8906 break;
8907
8908 case 'E':
8909 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8910 summarize_insn (XVECEXP (x, i, j), sum, 0);
8911 break;
8912
8913 case 'i':
8914 break;
8915
8916 default:
8917 gcc_unreachable ();
8918 }
8919 }
8920 }
8921
8922 /* Ensure a sufficient number of `trapb' insns are in the code when
8923 the user requests code with a trap precision of functions or
8924 instructions.
8925
8926 In naive mode, when the user requests a trap-precision of
8927 "instruction", a trapb is needed after every instruction that may
8928 generate a trap. This ensures that the code is resumption safe but
8929 it is also slow.
8930
8931 When optimizations are turned on, we delay issuing a trapb as long
8932 as possible. In this context, a trap shadow is the sequence of
8933 instructions that starts with a (potentially) trap generating
8934 instruction and extends to the next trapb or call_pal instruction
8935 (but GCC never generates call_pal by itself). We can delay (and
8936 therefore sometimes omit) a trapb subject to the following
8937 conditions:
8938
8939 (a) On entry to the trap shadow, if any Alpha register or memory
8940 location contains a value that is used as an operand value by some
8941 instruction in the trap shadow (live on entry), then no instruction
8942 in the trap shadow may modify the register or memory location.
8943
8944 (b) Within the trap shadow, the computation of the base register
8945 for a memory load or store instruction may not involve using the
8946 result of an instruction that might generate an UNPREDICTABLE
8947 result.
8948
8949 (c) Within the trap shadow, no register may be used more than once
8950 as a destination register. (This is to make life easier for the
8951 trap-handler.)
8952
8953 (d) The trap shadow may not include any branch instructions. */
8954
8955 static void
8956 alpha_handle_trap_shadows (void)
8957 {
8958 struct shadow_summary shadow;
8959 int trap_pending, exception_nesting;
8960 rtx i, n;
8961
8962 trap_pending = 0;
8963 exception_nesting = 0;
8964 shadow.used.i = 0;
8965 shadow.used.fp = 0;
8966 shadow.used.mem = 0;
8967 shadow.defd = shadow.used;
8968
8969 for (i = get_insns (); i ; i = NEXT_INSN (i))
8970 {
8971 if (NOTE_P (i))
8972 {
8973 switch (NOTE_KIND (i))
8974 {
8975 case NOTE_INSN_EH_REGION_BEG:
8976 exception_nesting++;
8977 if (trap_pending)
8978 goto close_shadow;
8979 break;
8980
8981 case NOTE_INSN_EH_REGION_END:
8982 exception_nesting--;
8983 if (trap_pending)
8984 goto close_shadow;
8985 break;
8986
8987 case NOTE_INSN_EPILOGUE_BEG:
8988 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8989 goto close_shadow;
8990 break;
8991 }
8992 }
8993 else if (trap_pending)
8994 {
8995 if (alpha_tp == ALPHA_TP_FUNC)
8996 {
8997 if (JUMP_P (i)
8998 && GET_CODE (PATTERN (i)) == RETURN)
8999 goto close_shadow;
9000 }
9001 else if (alpha_tp == ALPHA_TP_INSN)
9002 {
9003 if (optimize > 0)
9004 {
9005 struct shadow_summary sum;
9006
9007 sum.used.i = 0;
9008 sum.used.fp = 0;
9009 sum.used.mem = 0;
9010 sum.defd = sum.used;
9011
9012 switch (GET_CODE (i))
9013 {
9014 case INSN:
9015 /* Annoyingly, get_attr_trap will die on these. */
9016 if (GET_CODE (PATTERN (i)) == USE
9017 || GET_CODE (PATTERN (i)) == CLOBBER)
9018 break;
9019
9020 summarize_insn (PATTERN (i), &sum, 0);
9021
9022 if ((sum.defd.i & shadow.defd.i)
9023 || (sum.defd.fp & shadow.defd.fp))
9024 {
9025 /* (c) would be violated */
9026 goto close_shadow;
9027 }
9028
9029 /* Combine shadow with summary of current insn: */
9030 shadow.used.i |= sum.used.i;
9031 shadow.used.fp |= sum.used.fp;
9032 shadow.used.mem |= sum.used.mem;
9033 shadow.defd.i |= sum.defd.i;
9034 shadow.defd.fp |= sum.defd.fp;
9035 shadow.defd.mem |= sum.defd.mem;
9036
9037 if ((sum.defd.i & shadow.used.i)
9038 || (sum.defd.fp & shadow.used.fp)
9039 || (sum.defd.mem & shadow.used.mem))
9040 {
9041 /* (a) would be violated (also takes care of (b)) */
9042 gcc_assert (get_attr_trap (i) != TRAP_YES
9043 || (!(sum.defd.i & sum.used.i)
9044 && !(sum.defd.fp & sum.used.fp)));
9045
9046 goto close_shadow;
9047 }
9048 break;
9049
9050 case JUMP_INSN:
9051 case CALL_INSN:
9052 case CODE_LABEL:
9053 goto close_shadow;
9054
9055 default:
9056 gcc_unreachable ();
9057 }
9058 }
9059 else
9060 {
9061 close_shadow:
9062 n = emit_insn_before (gen_trapb (), i);
9063 PUT_MODE (n, TImode);
9064 PUT_MODE (i, TImode);
9065 trap_pending = 0;
9066 shadow.used.i = 0;
9067 shadow.used.fp = 0;
9068 shadow.used.mem = 0;
9069 shadow.defd = shadow.used;
9070 }
9071 }
9072 }
9073
9074 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9075 && NONJUMP_INSN_P (i)
9076 && GET_CODE (PATTERN (i)) != USE
9077 && GET_CODE (PATTERN (i)) != CLOBBER
9078 && get_attr_trap (i) == TRAP_YES)
9079 {
9080 if (optimize && !trap_pending)
9081 summarize_insn (PATTERN (i), &shadow, 0);
9082 trap_pending = 1;
9083 }
9084 }
9085 }
9086 \f
9087 /* Alpha can only issue instruction groups simultaneously if they are
9088 suitably aligned. This is very processor-specific. */
9089 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9090 that are marked "fake". These instructions do not exist on that target,
9091 but it is possible to see these insns with deranged combinations of
9092 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
9093 choose a result at random. */
9094
9095 enum alphaev4_pipe {
9096 EV4_STOP = 0,
9097 EV4_IB0 = 1,
9098 EV4_IB1 = 2,
9099 EV4_IBX = 4
9100 };
9101
9102 enum alphaev5_pipe {
9103 EV5_STOP = 0,
9104 EV5_NONE = 1,
9105 EV5_E01 = 2,
9106 EV5_E0 = 4,
9107 EV5_E1 = 8,
9108 EV5_FAM = 16,
9109 EV5_FA = 32,
9110 EV5_FM = 64
9111 };
9112
9113 static enum alphaev4_pipe
9114 alphaev4_insn_pipe (rtx insn)
9115 {
9116 if (recog_memoized (insn) < 0)
9117 return EV4_STOP;
9118 if (get_attr_length (insn) != 4)
9119 return EV4_STOP;
9120
9121 switch (get_attr_type (insn))
9122 {
9123 case TYPE_ILD:
9124 case TYPE_LDSYM:
9125 case TYPE_FLD:
9126 case TYPE_LD_L:
9127 return EV4_IBX;
9128
9129 case TYPE_IADD:
9130 case TYPE_ILOG:
9131 case TYPE_ICMOV:
9132 case TYPE_ICMP:
9133 case TYPE_FST:
9134 case TYPE_SHIFT:
9135 case TYPE_IMUL:
9136 case TYPE_FBR:
9137 case TYPE_MVI: /* fake */
9138 return EV4_IB0;
9139
9140 case TYPE_IST:
9141 case TYPE_MISC:
9142 case TYPE_IBR:
9143 case TYPE_JSR:
9144 case TYPE_CALLPAL:
9145 case TYPE_FCPYS:
9146 case TYPE_FCMOV:
9147 case TYPE_FADD:
9148 case TYPE_FDIV:
9149 case TYPE_FMUL:
9150 case TYPE_ST_C:
9151 case TYPE_MB:
9152 case TYPE_FSQRT: /* fake */
9153 case TYPE_FTOI: /* fake */
9154 case TYPE_ITOF: /* fake */
9155 return EV4_IB1;
9156
9157 default:
9158 gcc_unreachable ();
9159 }
9160 }
9161
9162 static enum alphaev5_pipe
9163 alphaev5_insn_pipe (rtx insn)
9164 {
9165 if (recog_memoized (insn) < 0)
9166 return EV5_STOP;
9167 if (get_attr_length (insn) != 4)
9168 return EV5_STOP;
9169
9170 switch (get_attr_type (insn))
9171 {
9172 case TYPE_ILD:
9173 case TYPE_FLD:
9174 case TYPE_LDSYM:
9175 case TYPE_IADD:
9176 case TYPE_ILOG:
9177 case TYPE_ICMOV:
9178 case TYPE_ICMP:
9179 return EV5_E01;
9180
9181 case TYPE_IST:
9182 case TYPE_FST:
9183 case TYPE_SHIFT:
9184 case TYPE_IMUL:
9185 case TYPE_MISC:
9186 case TYPE_MVI:
9187 case TYPE_LD_L:
9188 case TYPE_ST_C:
9189 case TYPE_MB:
9190 case TYPE_FTOI: /* fake */
9191 case TYPE_ITOF: /* fake */
9192 return EV5_E0;
9193
9194 case TYPE_IBR:
9195 case TYPE_JSR:
9196 case TYPE_CALLPAL:
9197 return EV5_E1;
9198
9199 case TYPE_FCPYS:
9200 return EV5_FAM;
9201
9202 case TYPE_FBR:
9203 case TYPE_FCMOV:
9204 case TYPE_FADD:
9205 case TYPE_FDIV:
9206 case TYPE_FSQRT: /* fake */
9207 return EV5_FA;
9208
9209 case TYPE_FMUL:
9210 return EV5_FM;
9211
9212 default:
9213 gcc_unreachable ();
9214 }
9215 }
9216
9217 /* IN_USE is a mask of the slots currently filled within the insn group.
9218 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9219 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9220
9221 LEN is, of course, the length of the group in bytes. */
9222
9223 static rtx
9224 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9225 {
9226 int len, in_use;
9227
9228 len = in_use = 0;
9229
9230 if (! INSN_P (insn)
9231 || GET_CODE (PATTERN (insn)) == CLOBBER
9232 || GET_CODE (PATTERN (insn)) == USE)
9233 goto next_and_done;
9234
9235 while (1)
9236 {
9237 enum alphaev4_pipe pipe;
9238
9239 pipe = alphaev4_insn_pipe (insn);
9240 switch (pipe)
9241 {
9242 case EV4_STOP:
9243 /* Force complex instructions to start new groups. */
9244 if (in_use)
9245 goto done;
9246
9247 /* If this is a completely unrecognized insn, it's an asm.
9248 We don't know how long it is, so record length as -1 to
9249 signal a needed realignment. */
9250 if (recog_memoized (insn) < 0)
9251 len = -1;
9252 else
9253 len = get_attr_length (insn);
9254 goto next_and_done;
9255
9256 case EV4_IBX:
9257 if (in_use & EV4_IB0)
9258 {
9259 if (in_use & EV4_IB1)
9260 goto done;
9261 in_use |= EV4_IB1;
9262 }
9263 else
9264 in_use |= EV4_IB0 | EV4_IBX;
9265 break;
9266
9267 case EV4_IB0:
9268 if (in_use & EV4_IB0)
9269 {
9270 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9271 goto done;
9272 in_use |= EV4_IB1;
9273 }
9274 in_use |= EV4_IB0;
9275 break;
9276
9277 case EV4_IB1:
9278 if (in_use & EV4_IB1)
9279 goto done;
9280 in_use |= EV4_IB1;
9281 break;
9282
9283 default:
9284 gcc_unreachable ();
9285 }
9286 len += 4;
9287
9288 /* Haifa doesn't do well scheduling branches. */
9289 if (JUMP_P (insn))
9290 goto next_and_done;
9291
9292 next:
9293 insn = next_nonnote_insn (insn);
9294
9295 if (!insn || ! INSN_P (insn))
9296 goto done;
9297
9298 /* Let Haifa tell us where it thinks insn group boundaries are. */
9299 if (GET_MODE (insn) == TImode)
9300 goto done;
9301
9302 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9303 goto next;
9304 }
9305
9306 next_and_done:
9307 insn = next_nonnote_insn (insn);
9308
9309 done:
9310 *plen = len;
9311 *pin_use = in_use;
9312 return insn;
9313 }
9314
9315 /* IN_USE is a mask of the slots currently filled within the insn group.
9316 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9317 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9318
9319 LEN is, of course, the length of the group in bytes. */
9320
9321 static rtx
9322 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9323 {
9324 int len, in_use;
9325
9326 len = in_use = 0;
9327
9328 if (! INSN_P (insn)
9329 || GET_CODE (PATTERN (insn)) == CLOBBER
9330 || GET_CODE (PATTERN (insn)) == USE)
9331 goto next_and_done;
9332
9333 while (1)
9334 {
9335 enum alphaev5_pipe pipe;
9336
9337 pipe = alphaev5_insn_pipe (insn);
9338 switch (pipe)
9339 {
9340 case EV5_STOP:
9341 /* Force complex instructions to start new groups. */
9342 if (in_use)
9343 goto done;
9344
9345 /* If this is a completely unrecognized insn, it's an asm.
9346 We don't know how long it is, so record length as -1 to
9347 signal a needed realignment. */
9348 if (recog_memoized (insn) < 0)
9349 len = -1;
9350 else
9351 len = get_attr_length (insn);
9352 goto next_and_done;
9353
9354 /* ??? Most of the places below, we would like to assert never
9355 happen, as it would indicate an error either in Haifa, or
9356 in the scheduling description. Unfortunately, Haifa never
9357 schedules the last instruction of the BB, so we don't have
9358 an accurate TI bit to go off. */
9359 case EV5_E01:
9360 if (in_use & EV5_E0)
9361 {
9362 if (in_use & EV5_E1)
9363 goto done;
9364 in_use |= EV5_E1;
9365 }
9366 else
9367 in_use |= EV5_E0 | EV5_E01;
9368 break;
9369
9370 case EV5_E0:
9371 if (in_use & EV5_E0)
9372 {
9373 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9374 goto done;
9375 in_use |= EV5_E1;
9376 }
9377 in_use |= EV5_E0;
9378 break;
9379
9380 case EV5_E1:
9381 if (in_use & EV5_E1)
9382 goto done;
9383 in_use |= EV5_E1;
9384 break;
9385
9386 case EV5_FAM:
9387 if (in_use & EV5_FA)
9388 {
9389 if (in_use & EV5_FM)
9390 goto done;
9391 in_use |= EV5_FM;
9392 }
9393 else
9394 in_use |= EV5_FA | EV5_FAM;
9395 break;
9396
9397 case EV5_FA:
9398 if (in_use & EV5_FA)
9399 goto done;
9400 in_use |= EV5_FA;
9401 break;
9402
9403 case EV5_FM:
9404 if (in_use & EV5_FM)
9405 goto done;
9406 in_use |= EV5_FM;
9407 break;
9408
9409 case EV5_NONE:
9410 break;
9411
9412 default:
9413 gcc_unreachable ();
9414 }
9415 len += 4;
9416
9417 /* Haifa doesn't do well scheduling branches. */
9418 /* ??? If this is predicted not-taken, slotting continues, except
9419 that no more IBR, FBR, or JSR insns may be slotted. */
9420 if (JUMP_P (insn))
9421 goto next_and_done;
9422
9423 next:
9424 insn = next_nonnote_insn (insn);
9425
9426 if (!insn || ! INSN_P (insn))
9427 goto done;
9428
9429 /* Let Haifa tell us where it thinks insn group boundaries are. */
9430 if (GET_MODE (insn) == TImode)
9431 goto done;
9432
9433 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9434 goto next;
9435 }
9436
9437 next_and_done:
9438 insn = next_nonnote_insn (insn);
9439
9440 done:
9441 *plen = len;
9442 *pin_use = in_use;
9443 return insn;
9444 }
9445
9446 static rtx
9447 alphaev4_next_nop (int *pin_use)
9448 {
9449 int in_use = *pin_use;
9450 rtx nop;
9451
9452 if (!(in_use & EV4_IB0))
9453 {
9454 in_use |= EV4_IB0;
9455 nop = gen_nop ();
9456 }
9457 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9458 {
9459 in_use |= EV4_IB1;
9460 nop = gen_nop ();
9461 }
9462 else if (TARGET_FP && !(in_use & EV4_IB1))
9463 {
9464 in_use |= EV4_IB1;
9465 nop = gen_fnop ();
9466 }
9467 else
9468 nop = gen_unop ();
9469
9470 *pin_use = in_use;
9471 return nop;
9472 }
9473
9474 static rtx
9475 alphaev5_next_nop (int *pin_use)
9476 {
9477 int in_use = *pin_use;
9478 rtx nop;
9479
9480 if (!(in_use & EV5_E1))
9481 {
9482 in_use |= EV5_E1;
9483 nop = gen_nop ();
9484 }
9485 else if (TARGET_FP && !(in_use & EV5_FA))
9486 {
9487 in_use |= EV5_FA;
9488 nop = gen_fnop ();
9489 }
9490 else if (TARGET_FP && !(in_use & EV5_FM))
9491 {
9492 in_use |= EV5_FM;
9493 nop = gen_fnop ();
9494 }
9495 else
9496 nop = gen_unop ();
9497
9498 *pin_use = in_use;
9499 return nop;
9500 }
9501
9502 /* The instruction group alignment main loop. */
9503
9504 static void
9505 alpha_align_insns (unsigned int max_align,
9506 rtx (*next_group) (rtx, int *, int *),
9507 rtx (*next_nop) (int *))
9508 {
9509 /* ALIGN is the known alignment for the insn group. */
9510 unsigned int align;
9511 /* OFS is the offset of the current insn in the insn group. */
9512 int ofs;
9513 int prev_in_use, in_use, len, ldgp;
9514 rtx i, next;
9515
9516 /* Let shorten branches care for assigning alignments to code labels. */
9517 shorten_branches (get_insns ());
9518
9519 if (align_functions < 4)
9520 align = 4;
9521 else if ((unsigned int) align_functions < max_align)
9522 align = align_functions;
9523 else
9524 align = max_align;
9525
9526 ofs = prev_in_use = 0;
9527 i = get_insns ();
9528 if (NOTE_P (i))
9529 i = next_nonnote_insn (i);
9530
9531 ldgp = alpha_function_needs_gp ? 8 : 0;
9532
9533 while (i)
9534 {
9535 next = (*next_group) (i, &in_use, &len);
9536
9537 /* When we see a label, resync alignment etc. */
9538 if (LABEL_P (i))
9539 {
9540 unsigned int new_align = 1 << label_to_alignment (i);
9541
9542 if (new_align >= align)
9543 {
9544 align = new_align < max_align ? new_align : max_align;
9545 ofs = 0;
9546 }
9547
9548 else if (ofs & (new_align-1))
9549 ofs = (ofs | (new_align-1)) + 1;
9550 gcc_assert (!len);
9551 }
9552
9553 /* Handle complex instructions special. */
9554 else if (in_use == 0)
9555 {
9556 /* Asms will have length < 0. This is a signal that we have
9557 lost alignment knowledge. Assume, however, that the asm
9558 will not mis-align instructions. */
9559 if (len < 0)
9560 {
9561 ofs = 0;
9562 align = 4;
9563 len = 0;
9564 }
9565 }
9566
9567 /* If the known alignment is smaller than the recognized insn group,
9568 realign the output. */
9569 else if ((int) align < len)
9570 {
9571 unsigned int new_log_align = len > 8 ? 4 : 3;
9572 rtx prev, where;
9573
9574 where = prev = prev_nonnote_insn (i);
9575 if (!where || !LABEL_P (where))
9576 where = i;
9577
9578 /* Can't realign between a call and its gp reload. */
9579 if (! (TARGET_EXPLICIT_RELOCS
9580 && prev && CALL_P (prev)))
9581 {
9582 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9583 align = 1 << new_log_align;
9584 ofs = 0;
9585 }
9586 }
9587
9588 /* We may not insert padding inside the initial ldgp sequence. */
9589 else if (ldgp > 0)
9590 ldgp -= len;
9591
9592 /* If the group won't fit in the same INT16 as the previous,
9593 we need to add padding to keep the group together. Rather
9594 than simply leaving the insn filling to the assembler, we
9595 can make use of the knowledge of what sorts of instructions
9596 were issued in the previous group to make sure that all of
9597 the added nops are really free. */
9598 else if (ofs + len > (int) align)
9599 {
9600 int nop_count = (align - ofs) / 4;
9601 rtx where;
9602
9603 /* Insert nops before labels, branches, and calls to truly merge
9604 the execution of the nops with the previous instruction group. */
9605 where = prev_nonnote_insn (i);
9606 if (where)
9607 {
9608 if (LABEL_P (where))
9609 {
9610 rtx where2 = prev_nonnote_insn (where);
9611 if (where2 && JUMP_P (where2))
9612 where = where2;
9613 }
9614 else if (NONJUMP_INSN_P (where))
9615 where = i;
9616 }
9617 else
9618 where = i;
9619
9620 do
9621 emit_insn_before ((*next_nop)(&prev_in_use), where);
9622 while (--nop_count);
9623 ofs = 0;
9624 }
9625
9626 ofs = (ofs + len) & (align - 1);
9627 prev_in_use = in_use;
9628 i = next;
9629 }
9630 }
9631
9632 /* Insert an unop between a noreturn function call and GP load. */
9633
9634 static void
9635 alpha_pad_noreturn (void)
9636 {
9637 rtx insn, next;
9638
9639 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9640 {
9641 if (!CALL_P (insn)
9642 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9643 continue;
9644
9645 next = next_active_insn (insn);
9646
9647 if (next)
9648 {
9649 rtx pat = PATTERN (next);
9650
9651 if (GET_CODE (pat) == SET
9652 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9653 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9654 emit_insn_after (gen_unop (), insn);
9655 }
9656 }
9657 }
9658 \f
9659 /* Machine dependent reorg pass. */
9660
9661 static void
9662 alpha_reorg (void)
9663 {
9664 /* Workaround for a linker error that triggers when an
9665 exception handler immediatelly follows a noreturn function.
9666
9667 The instruction stream from an object file:
9668
9669 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9670 58: 00 00 ba 27 ldah gp,0(ra)
9671 5c: 00 00 bd 23 lda gp,0(gp)
9672 60: 00 00 7d a7 ldq t12,0(gp)
9673 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9674
9675 was converted in the final link pass to:
9676
9677 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9678 fdb28: 00 00 fe 2f unop
9679 fdb2c: 00 00 fe 2f unop
9680 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9681 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9682
9683 GP load instructions were wrongly cleared by the linker relaxation
9684 pass. This workaround prevents removal of GP loads by inserting
9685 an unop instruction between a noreturn function call and
9686 exception handler prologue. */
9687
9688 if (current_function_has_exception_handlers ())
9689 alpha_pad_noreturn ();
9690
9691 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9692 alpha_handle_trap_shadows ();
9693
9694 /* Due to the number of extra trapb insns, don't bother fixing up
9695 alignment when trap precision is instruction. Moreover, we can
9696 only do our job when sched2 is run. */
9697 if (optimize && !optimize_size
9698 && alpha_tp != ALPHA_TP_INSN
9699 && flag_schedule_insns_after_reload)
9700 {
9701 if (alpha_tune == PROCESSOR_EV4)
9702 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9703 else if (alpha_tune == PROCESSOR_EV5)
9704 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9705 }
9706 }
9707 \f
9708 #if !TARGET_ABI_UNICOSMK
9709
9710 #ifdef HAVE_STAMP_H
9711 #include <stamp.h>
9712 #endif
9713
9714 static void
9715 alpha_file_start (void)
9716 {
9717 #ifdef OBJECT_FORMAT_ELF
9718 /* If emitting dwarf2 debug information, we cannot generate a .file
9719 directive to start the file, as it will conflict with dwarf2out
9720 file numbers. So it's only useful when emitting mdebug output. */
9721 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9722 #endif
9723
9724 default_file_start ();
9725 #ifdef MS_STAMP
9726 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9727 #endif
9728
9729 fputs ("\t.set noreorder\n", asm_out_file);
9730 fputs ("\t.set volatile\n", asm_out_file);
9731 if (!TARGET_ABI_OPEN_VMS)
9732 fputs ("\t.set noat\n", asm_out_file);
9733 if (TARGET_EXPLICIT_RELOCS)
9734 fputs ("\t.set nomacro\n", asm_out_file);
9735 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9736 {
9737 const char *arch;
9738
9739 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9740 arch = "ev6";
9741 else if (TARGET_MAX)
9742 arch = "pca56";
9743 else if (TARGET_BWX)
9744 arch = "ev56";
9745 else if (alpha_cpu == PROCESSOR_EV5)
9746 arch = "ev5";
9747 else
9748 arch = "ev4";
9749
9750 fprintf (asm_out_file, "\t.arch %s\n", arch);
9751 }
9752 }
9753 #endif
9754
9755 #ifdef OBJECT_FORMAT_ELF
9756 /* Since we don't have a .dynbss section, we should not allow global
9757 relocations in the .rodata section. */
9758
9759 static int
9760 alpha_elf_reloc_rw_mask (void)
9761 {
9762 return flag_pic ? 3 : 2;
9763 }
9764
9765 /* Return a section for X. The only special thing we do here is to
9766 honor small data. */
9767
9768 static section *
9769 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9770 unsigned HOST_WIDE_INT align)
9771 {
9772 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9773 /* ??? Consider using mergeable sdata sections. */
9774 return sdata_section;
9775 else
9776 return default_elf_select_rtx_section (mode, x, align);
9777 }
9778
9779 static unsigned int
9780 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9781 {
9782 unsigned int flags = 0;
9783
9784 if (strcmp (name, ".sdata") == 0
9785 || strncmp (name, ".sdata.", 7) == 0
9786 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9787 || strcmp (name, ".sbss") == 0
9788 || strncmp (name, ".sbss.", 6) == 0
9789 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9790 flags = SECTION_SMALL;
9791
9792 flags |= default_section_type_flags (decl, name, reloc);
9793 return flags;
9794 }
9795 #endif /* OBJECT_FORMAT_ELF */
9796 \f
9797 /* Structure to collect function names for final output in link section. */
9798 /* Note that items marked with GTY can't be ifdef'ed out. */
9799
9800 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9801 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9802
9803 struct GTY(()) alpha_links
9804 {
9805 int num;
9806 const char *target;
9807 rtx linkage;
9808 enum links_kind lkind;
9809 enum reloc_kind rkind;
9810 };
9811
9812 struct GTY(()) alpha_funcs
9813 {
9814 int num;
9815 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9816 links;
9817 };
9818
9819 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9820 splay_tree alpha_links_tree;
9821 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9822 splay_tree alpha_funcs_tree;
9823
9824 static GTY(()) int alpha_funcs_num;
9825
9826 #if TARGET_ABI_OPEN_VMS
9827
9828 /* Return the VMS argument type corresponding to MODE. */
9829
9830 enum avms_arg_type
9831 alpha_arg_type (enum machine_mode mode)
9832 {
9833 switch (mode)
9834 {
9835 case SFmode:
9836 return TARGET_FLOAT_VAX ? FF : FS;
9837 case DFmode:
9838 return TARGET_FLOAT_VAX ? FD : FT;
9839 default:
9840 return I64;
9841 }
9842 }
9843
9844 /* Return an rtx for an integer representing the VMS Argument Information
9845 register value. */
9846
9847 rtx
9848 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9849 {
9850 unsigned HOST_WIDE_INT regval = cum.num_args;
9851 int i;
9852
9853 for (i = 0; i < 6; i++)
9854 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9855
9856 return GEN_INT (regval);
9857 }
9858 \f
9859 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9860 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9861 Return a SYMBOL_REF suited to the call instruction. */
9862
9863 rtx
9864 alpha_need_linkage (const char *name, int is_local)
9865 {
9866 splay_tree_node node;
9867 struct alpha_links *al;
9868 const char *target;
9869 tree id;
9870
9871 if (name[0] == '*')
9872 name++;
9873
9874 if (is_local)
9875 {
9876 struct alpha_funcs *cfaf;
9877
9878 if (!alpha_funcs_tree)
9879 alpha_funcs_tree = splay_tree_new_ggc
9880 (splay_tree_compare_pointers,
9881 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9882 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9883
9884
9885 cfaf = ggc_alloc_alpha_funcs ();
9886
9887 cfaf->links = 0;
9888 cfaf->num = ++alpha_funcs_num;
9889
9890 splay_tree_insert (alpha_funcs_tree,
9891 (splay_tree_key) current_function_decl,
9892 (splay_tree_value) cfaf);
9893 }
9894
9895 if (alpha_links_tree)
9896 {
9897 /* Is this name already defined? */
9898
9899 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9900 if (node)
9901 {
9902 al = (struct alpha_links *) node->value;
9903 if (is_local)
9904 {
9905 /* Defined here but external assumed. */
9906 if (al->lkind == KIND_EXTERN)
9907 al->lkind = KIND_LOCAL;
9908 }
9909 else
9910 {
9911 /* Used here but unused assumed. */
9912 if (al->lkind == KIND_UNUSED)
9913 al->lkind = KIND_LOCAL;
9914 }
9915 return al->linkage;
9916 }
9917 }
9918 else
9919 alpha_links_tree = splay_tree_new_ggc
9920 ((splay_tree_compare_fn) strcmp,
9921 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9922 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9923
9924 al = ggc_alloc_alpha_links ();
9925 name = ggc_strdup (name);
9926
9927 /* Assume external if no definition. */
9928 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9929
9930 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9931 and find the ultimate alias target like assemble_name. */
9932 id = get_identifier (name);
9933 target = NULL;
9934 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9935 {
9936 id = TREE_CHAIN (id);
9937 target = IDENTIFIER_POINTER (id);
9938 }
9939
9940 al->target = target ? target : name;
9941 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9942
9943 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9944 (splay_tree_value) al);
9945
9946 return al->linkage;
9947 }
9948
9949 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9950 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9951 this is the reference to the linkage pointer value, 0 if this is the
9952 reference to the function entry value. RFLAG is 1 if this a reduced
9953 reference (code address only), 0 if this is a full reference. */
9954
9955 rtx
9956 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9957 {
9958 splay_tree_node cfunnode;
9959 struct alpha_funcs *cfaf;
9960 struct alpha_links *al;
9961 const char *name = XSTR (func, 0);
9962
9963 cfaf = (struct alpha_funcs *) 0;
9964 al = (struct alpha_links *) 0;
9965
9966 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9967 cfaf = (struct alpha_funcs *) cfunnode->value;
9968
9969 if (cfaf->links)
9970 {
9971 splay_tree_node lnode;
9972
9973 /* Is this name already defined? */
9974
9975 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9976 if (lnode)
9977 al = (struct alpha_links *) lnode->value;
9978 }
9979 else
9980 cfaf->links = splay_tree_new_ggc
9981 ((splay_tree_compare_fn) strcmp,
9982 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9983 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9984
9985 if (!al)
9986 {
9987 size_t name_len;
9988 size_t buflen;
9989 char *linksym;
9990 splay_tree_node node = 0;
9991 struct alpha_links *anl;
9992
9993 if (name[0] == '*')
9994 name++;
9995
9996 name_len = strlen (name);
9997 linksym = (char *) alloca (name_len + 50);
9998
9999 al = ggc_alloc_alpha_links ();
10000 al->num = cfaf->num;
10001 al->target = NULL;
10002
10003 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10004 if (node)
10005 {
10006 anl = (struct alpha_links *) node->value;
10007 al->lkind = anl->lkind;
10008 name = anl->target;
10009 }
10010
10011 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10012 buflen = strlen (linksym);
10013
10014 al->linkage = gen_rtx_SYMBOL_REF
10015 (Pmode, ggc_alloc_string (linksym, buflen + 1));
10016
10017 splay_tree_insert (cfaf->links, (splay_tree_key) name,
10018 (splay_tree_value) al);
10019 }
10020
10021 if (rflag)
10022 al->rkind = KIND_CODEADDR;
10023 else
10024 al->rkind = KIND_LINKAGE;
10025
10026 if (lflag)
10027 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10028 else
10029 return al->linkage;
10030 }
10031
10032 static int
10033 alpha_write_one_linkage (splay_tree_node node, void *data)
10034 {
10035 const char *const name = (const char *) node->key;
10036 struct alpha_links *link = (struct alpha_links *) node->value;
10037 FILE *stream = (FILE *) data;
10038
10039 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10040 if (link->rkind == KIND_CODEADDR)
10041 {
10042 if (link->lkind == KIND_LOCAL)
10043 {
10044 /* Local and used */
10045 fprintf (stream, "\t.quad %s..en\n", name);
10046 }
10047 else
10048 {
10049 /* External and used, request code address. */
10050 fprintf (stream, "\t.code_address %s\n", name);
10051 }
10052 }
10053 else
10054 {
10055 if (link->lkind == KIND_LOCAL)
10056 {
10057 /* Local and used, build linkage pair. */
10058 fprintf (stream, "\t.quad %s..en\n", name);
10059 fprintf (stream, "\t.quad %s\n", name);
10060 }
10061 else
10062 {
10063 /* External and used, request linkage pair. */
10064 fprintf (stream, "\t.linkage %s\n", name);
10065 }
10066 }
10067
10068 return 0;
10069 }
10070
10071 static void
10072 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10073 {
10074 splay_tree_node node;
10075 struct alpha_funcs *func;
10076
10077 fprintf (stream, "\t.link\n");
10078 fprintf (stream, "\t.align 3\n");
10079 in_section = NULL;
10080
10081 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10082 func = (struct alpha_funcs *) node->value;
10083
10084 fputs ("\t.name ", stream);
10085 assemble_name (stream, funname);
10086 fputs ("..na\n", stream);
10087 ASM_OUTPUT_LABEL (stream, funname);
10088 fprintf (stream, "\t.pdesc ");
10089 assemble_name (stream, funname);
10090 fprintf (stream, "..en,%s\n",
10091 alpha_procedure_type == PT_STACK ? "stack"
10092 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10093
10094 if (func->links)
10095 {
10096 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10097 /* splay_tree_delete (func->links); */
10098 }
10099 }
10100
10101 /* Switch to an arbitrary section NAME with attributes as specified
10102 by FLAGS. ALIGN specifies any known alignment requirements for
10103 the section; 0 if the default should be used. */
10104
10105 static void
10106 vms_asm_named_section (const char *name, unsigned int flags,
10107 tree decl ATTRIBUTE_UNUSED)
10108 {
10109 fputc ('\n', asm_out_file);
10110 fprintf (asm_out_file, ".section\t%s", name);
10111
10112 if (flags & SECTION_DEBUG)
10113 fprintf (asm_out_file, ",NOWRT");
10114
10115 fputc ('\n', asm_out_file);
10116 }
10117
10118 /* Record an element in the table of global constructors. SYMBOL is
10119 a SYMBOL_REF of the function to be called; PRIORITY is a number
10120 between 0 and MAX_INIT_PRIORITY.
10121
10122 Differs from default_ctors_section_asm_out_constructor in that the
10123 width of the .ctors entry is always 64 bits, rather than the 32 bits
10124 used by a normal pointer. */
10125
10126 static void
10127 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10128 {
10129 switch_to_section (ctors_section);
10130 assemble_align (BITS_PER_WORD);
10131 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10132 }
10133
10134 static void
10135 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10136 {
10137 switch_to_section (dtors_section);
10138 assemble_align (BITS_PER_WORD);
10139 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10140 }
10141 #else
10142
10143 rtx
10144 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10145 int is_local ATTRIBUTE_UNUSED)
10146 {
10147 return NULL_RTX;
10148 }
10149
10150 rtx
10151 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10152 tree cfundecl ATTRIBUTE_UNUSED,
10153 int lflag ATTRIBUTE_UNUSED,
10154 int rflag ATTRIBUTE_UNUSED)
10155 {
10156 return NULL_RTX;
10157 }
10158
10159 #endif /* TARGET_ABI_OPEN_VMS */
10160 \f
10161 #if TARGET_ABI_UNICOSMK
10162
10163 /* This evaluates to true if we do not know how to pass TYPE solely in
10164 registers. This is the case for all arguments that do not fit in two
10165 registers. */
10166
10167 static bool
10168 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10169 {
10170 if (type == NULL)
10171 return false;
10172
10173 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10174 return true;
10175 if (TREE_ADDRESSABLE (type))
10176 return true;
10177
10178 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10179 }
10180
10181 /* Define the offset between two registers, one to be eliminated, and the
10182 other its replacement, at the start of a routine. */
10183
10184 int
10185 unicosmk_initial_elimination_offset (int from, int to)
10186 {
10187 int fixed_size;
10188
10189 fixed_size = alpha_sa_size();
10190 if (fixed_size != 0)
10191 fixed_size += 48;
10192
10193 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10194 return -fixed_size;
10195 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10196 return 0;
10197 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10198 return (ALPHA_ROUND (crtl->outgoing_args_size)
10199 + ALPHA_ROUND (get_frame_size()));
10200 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10201 return (ALPHA_ROUND (fixed_size)
10202 + ALPHA_ROUND (get_frame_size()
10203 + crtl->outgoing_args_size));
10204 else
10205 gcc_unreachable ();
10206 }
10207
10208 /* Output the module name for .ident and .end directives. We have to strip
10209 directories and add make sure that the module name starts with a letter
10210 or '$'. */
10211
10212 static void
10213 unicosmk_output_module_name (FILE *file)
10214 {
10215 const char *name = lbasename (main_input_filename);
10216 unsigned len = strlen (name);
10217 char *clean_name = alloca (len + 2);
10218 char *ptr = clean_name;
10219
10220 /* CAM only accepts module names that start with a letter or '$'. We
10221 prefix the module name with a '$' if necessary. */
10222
10223 if (!ISALPHA (*name))
10224 *ptr++ = '$';
10225 memcpy (ptr, name, len + 1);
10226 clean_symbol_name (clean_name);
10227 fputs (clean_name, file);
10228 }
10229
10230 /* Output the definition of a common variable. */
10231
10232 void
10233 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10234 {
10235 tree name_tree;
10236 printf ("T3E__: common %s\n", name);
10237
10238 in_section = NULL;
10239 fputs("\t.endp\n\n\t.psect ", file);
10240 assemble_name(file, name);
10241 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10242 fprintf(file, "\t.byte\t0:%d\n", size);
10243
10244 /* Mark the symbol as defined in this module. */
10245 name_tree = get_identifier (name);
10246 TREE_ASM_WRITTEN (name_tree) = 1;
10247 }
10248
10249 #define SECTION_PUBLIC SECTION_MACH_DEP
10250 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10251 static int current_section_align;
10252
10253 /* A get_unnamed_section callback for switching to the text section. */
10254
10255 static void
10256 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10257 {
10258 static int count = 0;
10259 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10260 }
10261
10262 /* A get_unnamed_section callback for switching to the data section. */
10263
10264 static void
10265 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10266 {
10267 static int count = 1;
10268 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10269 }
10270
10271 /* Implement TARGET_ASM_INIT_SECTIONS.
10272
10273 The Cray assembler is really weird with respect to sections. It has only
10274 named sections and you can't reopen a section once it has been closed.
10275 This means that we have to generate unique names whenever we want to
10276 reenter the text or the data section. */
10277
10278 static void
10279 unicosmk_init_sections (void)
10280 {
10281 text_section = get_unnamed_section (SECTION_CODE,
10282 unicosmk_output_text_section_asm_op,
10283 NULL);
10284 data_section = get_unnamed_section (SECTION_WRITE,
10285 unicosmk_output_data_section_asm_op,
10286 NULL);
10287 readonly_data_section = data_section;
10288 }
10289
10290 static unsigned int
10291 unicosmk_section_type_flags (tree decl, const char *name,
10292 int reloc ATTRIBUTE_UNUSED)
10293 {
10294 unsigned int flags = default_section_type_flags (decl, name, reloc);
10295
10296 if (!decl)
10297 return flags;
10298
10299 if (TREE_CODE (decl) == FUNCTION_DECL)
10300 {
10301 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10302 if (align_functions_log > current_section_align)
10303 current_section_align = align_functions_log;
10304
10305 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10306 flags |= SECTION_MAIN;
10307 }
10308 else
10309 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10310
10311 if (TREE_PUBLIC (decl))
10312 flags |= SECTION_PUBLIC;
10313
10314 return flags;
10315 }
10316
10317 /* Generate a section name for decl and associate it with the
10318 declaration. */
10319
10320 static void
10321 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10322 {
10323 const char *name;
10324 int len;
10325
10326 gcc_assert (decl);
10327
10328 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10329 name = default_strip_name_encoding (name);
10330 len = strlen (name);
10331
10332 if (TREE_CODE (decl) == FUNCTION_DECL)
10333 {
10334 char *string;
10335
10336 /* It is essential that we prefix the section name here because
10337 otherwise the section names generated for constructors and
10338 destructors confuse collect2. */
10339
10340 string = alloca (len + 6);
10341 sprintf (string, "code@%s", name);
10342 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10343 }
10344 else if (TREE_PUBLIC (decl))
10345 DECL_SECTION_NAME (decl) = build_string (len, name);
10346 else
10347 {
10348 char *string;
10349
10350 string = alloca (len + 6);
10351 sprintf (string, "data@%s", name);
10352 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10353 }
10354 }
10355
10356 /* Switch to an arbitrary section NAME with attributes as specified
10357 by FLAGS. ALIGN specifies any known alignment requirements for
10358 the section; 0 if the default should be used. */
10359
10360 static void
10361 unicosmk_asm_named_section (const char *name, unsigned int flags,
10362 tree decl ATTRIBUTE_UNUSED)
10363 {
10364 const char *kind;
10365
10366 /* Close the previous section. */
10367
10368 fputs ("\t.endp\n\n", asm_out_file);
10369
10370 /* Find out what kind of section we are opening. */
10371
10372 if (flags & SECTION_MAIN)
10373 fputs ("\t.start\tmain\n", asm_out_file);
10374
10375 if (flags & SECTION_CODE)
10376 kind = "code";
10377 else if (flags & SECTION_PUBLIC)
10378 kind = "common";
10379 else
10380 kind = "data";
10381
10382 if (current_section_align != 0)
10383 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10384 current_section_align, kind);
10385 else
10386 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10387 }
10388
10389 static void
10390 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10391 {
10392 if (DECL_P (decl)
10393 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10394 unicosmk_unique_section (decl, 0);
10395 }
10396
10397 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10398 in code sections because .align fill unused space with zeroes. */
10399
10400 void
10401 unicosmk_output_align (FILE *file, int align)
10402 {
10403 if (inside_function)
10404 fprintf (file, "\tgcc@code@align\t%d\n", align);
10405 else
10406 fprintf (file, "\t.align\t%d\n", align);
10407 }
10408
10409 /* Add a case vector to the current function's list of deferred case
10410 vectors. Case vectors have to be put into a separate section because CAM
10411 does not allow data definitions in code sections. */
10412
10413 void
10414 unicosmk_defer_case_vector (rtx lab, rtx vec)
10415 {
10416 struct machine_function *machine = cfun->machine;
10417
10418 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10419 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10420 machine->addr_list);
10421 }
10422
10423 /* Output a case vector. */
10424
10425 static void
10426 unicosmk_output_addr_vec (FILE *file, rtx vec)
10427 {
10428 rtx lab = XEXP (vec, 0);
10429 rtx body = XEXP (vec, 1);
10430 int vlen = XVECLEN (body, 0);
10431 int idx;
10432
10433 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10434
10435 for (idx = 0; idx < vlen; idx++)
10436 {
10437 ASM_OUTPUT_ADDR_VEC_ELT
10438 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10439 }
10440 }
10441
10442 /* Output current function's deferred case vectors. */
10443
10444 static void
10445 unicosmk_output_deferred_case_vectors (FILE *file)
10446 {
10447 struct machine_function *machine = cfun->machine;
10448 rtx t;
10449
10450 if (machine->addr_list == NULL_RTX)
10451 return;
10452
10453 switch_to_section (data_section);
10454 for (t = machine->addr_list; t; t = XEXP (t, 1))
10455 unicosmk_output_addr_vec (file, XEXP (t, 0));
10456 }
10457
10458 /* Generate the name of the SSIB section for the current function. */
10459
10460 #define SSIB_PREFIX "__SSIB_"
10461 #define SSIB_PREFIX_LEN 7
10462
10463 static const char *
10464 unicosmk_ssib_name (void)
10465 {
10466 /* This is ok since CAM won't be able to deal with names longer than that
10467 anyway. */
10468
10469 static char name[256];
10470
10471 rtx x;
10472 const char *fnname;
10473 int len;
10474
10475 x = DECL_RTL (cfun->decl);
10476 gcc_assert (MEM_P (x));
10477 x = XEXP (x, 0);
10478 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10479 fnname = XSTR (x, 0);
10480
10481 len = strlen (fnname);
10482 if (len + SSIB_PREFIX_LEN > 255)
10483 len = 255 - SSIB_PREFIX_LEN;
10484
10485 strcpy (name, SSIB_PREFIX);
10486 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10487 name[len + SSIB_PREFIX_LEN] = 0;
10488
10489 return name;
10490 }
10491
10492 /* Set up the dynamic subprogram information block (DSIB) and update the
10493 frame pointer register ($15) for subroutines which have a frame. If the
10494 subroutine doesn't have a frame, simply increment $15. */
10495
10496 static void
10497 unicosmk_gen_dsib (unsigned long *imaskP)
10498 {
10499 if (alpha_procedure_type == PT_STACK)
10500 {
10501 const char *ssib_name;
10502 rtx mem;
10503
10504 /* Allocate 64 bytes for the DSIB. */
10505
10506 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10507 GEN_INT (-64))));
10508 emit_insn (gen_blockage ());
10509
10510 /* Save the return address. */
10511
10512 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10513 set_mem_alias_set (mem, alpha_sr_alias_set);
10514 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10515 (*imaskP) &= ~(1UL << REG_RA);
10516
10517 /* Save the old frame pointer. */
10518
10519 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10520 set_mem_alias_set (mem, alpha_sr_alias_set);
10521 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10522 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10523
10524 emit_insn (gen_blockage ());
10525
10526 /* Store the SSIB pointer. */
10527
10528 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10529 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10530 set_mem_alias_set (mem, alpha_sr_alias_set);
10531
10532 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10533 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10534 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10535
10536 /* Save the CIW index. */
10537
10538 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10539 set_mem_alias_set (mem, alpha_sr_alias_set);
10540 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10541
10542 emit_insn (gen_blockage ());
10543
10544 /* Set the new frame pointer. */
10545 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10546 stack_pointer_rtx, GEN_INT (64))));
10547 }
10548 else
10549 {
10550 /* Increment the frame pointer register to indicate that we do not
10551 have a frame. */
10552 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10553 hard_frame_pointer_rtx, const1_rtx));
10554 }
10555 }
10556
10557 /* Output the static subroutine information block for the current
10558 function. */
10559
10560 static void
10561 unicosmk_output_ssib (FILE *file, const char *fnname)
10562 {
10563 int len;
10564 int i;
10565 rtx x;
10566 rtx ciw;
10567 struct machine_function *machine = cfun->machine;
10568
10569 in_section = NULL;
10570 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10571 unicosmk_ssib_name ());
10572
10573 /* Some required stuff and the function name length. */
10574
10575 len = strlen (fnname);
10576 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10577
10578 /* Saved registers
10579 ??? We don't do that yet. */
10580
10581 fputs ("\t.quad\t0\n", file);
10582
10583 /* Function address. */
10584
10585 fputs ("\t.quad\t", file);
10586 assemble_name (file, fnname);
10587 putc ('\n', file);
10588
10589 fputs ("\t.quad\t0\n", file);
10590 fputs ("\t.quad\t0\n", file);
10591
10592 /* Function name.
10593 ??? We do it the same way Cray CC does it but this could be
10594 simplified. */
10595
10596 for( i = 0; i < len; i++ )
10597 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10598 if( (len % 8) == 0 )
10599 fputs ("\t.quad\t0\n", file);
10600 else
10601 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10602
10603 /* All call information words used in the function. */
10604
10605 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10606 {
10607 ciw = XEXP (x, 0);
10608 #if HOST_BITS_PER_WIDE_INT == 32
10609 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10610 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10611 #else
10612 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10613 #endif
10614 }
10615 }
10616
10617 /* Add a call information word (CIW) to the list of the current function's
10618 CIWs and return its index.
10619
10620 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10621
10622 rtx
10623 unicosmk_add_call_info_word (rtx x)
10624 {
10625 rtx node;
10626 struct machine_function *machine = cfun->machine;
10627
10628 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10629 if (machine->first_ciw == NULL_RTX)
10630 machine->first_ciw = node;
10631 else
10632 XEXP (machine->last_ciw, 1) = node;
10633
10634 machine->last_ciw = node;
10635 ++machine->ciw_count;
10636
10637 return GEN_INT (machine->ciw_count
10638 + strlen (current_function_name ())/8 + 5);
10639 }
10640
10641 /* The Cray assembler doesn't accept extern declarations for symbols which
10642 are defined in the same file. We have to keep track of all global
10643 symbols which are referenced and/or defined in a source file and output
10644 extern declarations for those which are referenced but not defined at
10645 the end of file. */
10646
10647 /* List of identifiers for which an extern declaration might have to be
10648 emitted. */
10649 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10650
10651 struct unicosmk_extern_list
10652 {
10653 struct unicosmk_extern_list *next;
10654 const char *name;
10655 };
10656
10657 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10658
10659 /* Output extern declarations which are required for every asm file. */
10660
10661 static void
10662 unicosmk_output_default_externs (FILE *file)
10663 {
10664 static const char *const externs[] =
10665 { "__T3E_MISMATCH" };
10666
10667 int i;
10668 int n;
10669
10670 n = ARRAY_SIZE (externs);
10671
10672 for (i = 0; i < n; i++)
10673 fprintf (file, "\t.extern\t%s\n", externs[i]);
10674 }
10675
10676 /* Output extern declarations for global symbols which are have been
10677 referenced but not defined. */
10678
10679 static void
10680 unicosmk_output_externs (FILE *file)
10681 {
10682 struct unicosmk_extern_list *p;
10683 const char *real_name;
10684 int len;
10685 tree name_tree;
10686
10687 len = strlen (user_label_prefix);
10688 for (p = unicosmk_extern_head; p != 0; p = p->next)
10689 {
10690 /* We have to strip the encoding and possibly remove user_label_prefix
10691 from the identifier in order to handle -fleading-underscore and
10692 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10693 real_name = default_strip_name_encoding (p->name);
10694 if (len && p->name[0] == '*'
10695 && !memcmp (real_name, user_label_prefix, len))
10696 real_name += len;
10697
10698 name_tree = get_identifier (real_name);
10699 if (! TREE_ASM_WRITTEN (name_tree))
10700 {
10701 TREE_ASM_WRITTEN (name_tree) = 1;
10702 fputs ("\t.extern\t", file);
10703 assemble_name (file, p->name);
10704 putc ('\n', file);
10705 }
10706 }
10707 }
10708
10709 /* Record an extern. */
10710
10711 void
10712 unicosmk_add_extern (const char *name)
10713 {
10714 struct unicosmk_extern_list *p;
10715
10716 p = (struct unicosmk_extern_list *)
10717 xmalloc (sizeof (struct unicosmk_extern_list));
10718 p->next = unicosmk_extern_head;
10719 p->name = name;
10720 unicosmk_extern_head = p;
10721 }
10722
10723 /* The Cray assembler generates incorrect code if identifiers which
10724 conflict with register names are used as instruction operands. We have
10725 to replace such identifiers with DEX expressions. */
10726
10727 /* Structure to collect identifiers which have been replaced by DEX
10728 expressions. */
10729 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10730
10731 struct unicosmk_dex {
10732 struct unicosmk_dex *next;
10733 const char *name;
10734 };
10735
10736 /* List of identifiers which have been replaced by DEX expressions. The DEX
10737 number is determined by the position in the list. */
10738
10739 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10740
10741 /* The number of elements in the DEX list. */
10742
10743 static int unicosmk_dex_count = 0;
10744
10745 /* Check if NAME must be replaced by a DEX expression. */
10746
10747 static int
10748 unicosmk_special_name (const char *name)
10749 {
10750 if (name[0] == '*')
10751 ++name;
10752
10753 if (name[0] == '$')
10754 ++name;
10755
10756 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10757 return 0;
10758
10759 switch (name[1])
10760 {
10761 case '1': case '2':
10762 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10763
10764 case '3':
10765 return (name[2] == '\0'
10766 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10767
10768 default:
10769 return (ISDIGIT (name[1]) && name[2] == '\0');
10770 }
10771 }
10772
10773 /* Return the DEX number if X must be replaced by a DEX expression and 0
10774 otherwise. */
10775
10776 static int
10777 unicosmk_need_dex (rtx x)
10778 {
10779 struct unicosmk_dex *dex;
10780 const char *name;
10781 int i;
10782
10783 if (GET_CODE (x) != SYMBOL_REF)
10784 return 0;
10785
10786 name = XSTR (x,0);
10787 if (! unicosmk_special_name (name))
10788 return 0;
10789
10790 i = unicosmk_dex_count;
10791 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10792 {
10793 if (! strcmp (name, dex->name))
10794 return i;
10795 --i;
10796 }
10797
10798 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10799 dex->name = name;
10800 dex->next = unicosmk_dex_list;
10801 unicosmk_dex_list = dex;
10802
10803 ++unicosmk_dex_count;
10804 return unicosmk_dex_count;
10805 }
10806
10807 /* Output the DEX definitions for this file. */
10808
10809 static void
10810 unicosmk_output_dex (FILE *file)
10811 {
10812 struct unicosmk_dex *dex;
10813 int i;
10814
10815 if (unicosmk_dex_list == NULL)
10816 return;
10817
10818 fprintf (file, "\t.dexstart\n");
10819
10820 i = unicosmk_dex_count;
10821 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10822 {
10823 fprintf (file, "\tDEX (%d) = ", i);
10824 assemble_name (file, dex->name);
10825 putc ('\n', file);
10826 --i;
10827 }
10828
10829 fprintf (file, "\t.dexend\n");
10830 }
10831
10832 /* Output text that to appear at the beginning of an assembler file. */
10833
10834 static void
10835 unicosmk_file_start (void)
10836 {
10837 int i;
10838
10839 fputs ("\t.ident\t", asm_out_file);
10840 unicosmk_output_module_name (asm_out_file);
10841 fputs ("\n\n", asm_out_file);
10842
10843 /* The Unicos/Mk assembler uses different register names. Instead of trying
10844 to support them, we simply use micro definitions. */
10845
10846 /* CAM has different register names: rN for the integer register N and fN
10847 for the floating-point register N. Instead of trying to use these in
10848 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10849 register. */
10850
10851 for (i = 0; i < 32; ++i)
10852 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10853
10854 for (i = 0; i < 32; ++i)
10855 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10856
10857 putc ('\n', asm_out_file);
10858
10859 /* The .align directive fill unused space with zeroes which does not work
10860 in code sections. We define the macro 'gcc@code@align' which uses nops
10861 instead. Note that it assumes that code sections always have the
10862 biggest possible alignment since . refers to the current offset from
10863 the beginning of the section. */
10864
10865 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10866 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10867 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10868 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10869 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10870 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10871 fputs ("\t.endr\n", asm_out_file);
10872 fputs ("\t.endif\n", asm_out_file);
10873 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10874
10875 /* Output extern declarations which should always be visible. */
10876 unicosmk_output_default_externs (asm_out_file);
10877
10878 /* Open a dummy section. We always need to be inside a section for the
10879 section-switching code to work correctly.
10880 ??? This should be a module id or something like that. I still have to
10881 figure out what the rules for those are. */
10882 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10883 }
10884
10885 /* Output text to appear at the end of an assembler file. This includes all
10886 pending extern declarations and DEX expressions. */
10887
10888 static void
10889 unicosmk_file_end (void)
10890 {
10891 fputs ("\t.endp\n\n", asm_out_file);
10892
10893 /* Output all pending externs. */
10894
10895 unicosmk_output_externs (asm_out_file);
10896
10897 /* Output dex definitions used for functions whose names conflict with
10898 register names. */
10899
10900 unicosmk_output_dex (asm_out_file);
10901
10902 fputs ("\t.end\t", asm_out_file);
10903 unicosmk_output_module_name (asm_out_file);
10904 putc ('\n', asm_out_file);
10905 }
10906
10907 #else
10908
10909 static void
10910 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10911 {}
10912
10913 static void
10914 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10915 {}
10916
10917 static void
10918 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10919 const char * fnname ATTRIBUTE_UNUSED)
10920 {}
10921
10922 rtx
10923 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10924 {
10925 return NULL_RTX;
10926 }
10927
10928 static int
10929 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10930 {
10931 return 0;
10932 }
10933
10934 #endif /* TARGET_ABI_UNICOSMK */
10935
10936 static void
10937 alpha_init_libfuncs (void)
10938 {
10939 if (TARGET_ABI_UNICOSMK)
10940 {
10941 /* Prevent gcc from generating calls to __divsi3. */
10942 set_optab_libfunc (sdiv_optab, SImode, 0);
10943 set_optab_libfunc (udiv_optab, SImode, 0);
10944
10945 /* Use the functions provided by the system library
10946 for DImode integer division. */
10947 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10948 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10949 }
10950 else if (TARGET_ABI_OPEN_VMS)
10951 {
10952 /* Use the VMS runtime library functions for division and
10953 remainder. */
10954 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10955 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10956 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10957 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10958 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10959 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10960 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10961 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10962 abort_libfunc = init_one_libfunc ("decc$abort");
10963 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10964 #ifdef MEM_LIBFUNCS_INIT
10965 MEM_LIBFUNCS_INIT;
10966 #endif
10967 }
10968 }
10969
10970 \f
10971 /* Initialize the GCC target structure. */
10972 #if TARGET_ABI_OPEN_VMS
10973 # undef TARGET_ATTRIBUTE_TABLE
10974 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10975 # undef TARGET_CAN_ELIMINATE
10976 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
10977 #endif
10978
10979 #undef TARGET_IN_SMALL_DATA_P
10980 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10981
10982 #if TARGET_ABI_UNICOSMK
10983 # undef TARGET_INSERT_ATTRIBUTES
10984 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10985 # undef TARGET_SECTION_TYPE_FLAGS
10986 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10987 # undef TARGET_ASM_UNIQUE_SECTION
10988 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10989 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10990 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10991 # undef TARGET_ASM_GLOBALIZE_LABEL
10992 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10993 # undef TARGET_MUST_PASS_IN_STACK
10994 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10995 #endif
10996
10997 #undef TARGET_ASM_ALIGNED_HI_OP
10998 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10999 #undef TARGET_ASM_ALIGNED_DI_OP
11000 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11001
11002 /* Default unaligned ops are provided for ELF systems. To get unaligned
11003 data for non-ELF systems, we have to turn off auto alignment. */
11004 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11005 #undef TARGET_ASM_UNALIGNED_HI_OP
11006 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11007 #undef TARGET_ASM_UNALIGNED_SI_OP
11008 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11009 #undef TARGET_ASM_UNALIGNED_DI_OP
11010 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11011 #endif
11012
11013 #ifdef OBJECT_FORMAT_ELF
11014 #undef TARGET_ASM_RELOC_RW_MASK
11015 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
11016 #undef TARGET_ASM_SELECT_RTX_SECTION
11017 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
11018 #undef TARGET_SECTION_TYPE_FLAGS
11019 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
11020 #endif
11021
11022 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
11023 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11024
11025 #undef TARGET_INIT_LIBFUNCS
11026 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11027
11028 #undef TARGET_LEGITIMIZE_ADDRESS
11029 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11030
11031 #if TARGET_ABI_UNICOSMK
11032 #undef TARGET_ASM_FILE_START
11033 #define TARGET_ASM_FILE_START unicosmk_file_start
11034 #undef TARGET_ASM_FILE_END
11035 #define TARGET_ASM_FILE_END unicosmk_file_end
11036 #else
11037 #undef TARGET_ASM_FILE_START
11038 #define TARGET_ASM_FILE_START alpha_file_start
11039 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11040 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11041 #endif
11042
11043 #undef TARGET_SCHED_ADJUST_COST
11044 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11045 #undef TARGET_SCHED_ISSUE_RATE
11046 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11047 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11048 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11049 alpha_multipass_dfa_lookahead
11050
11051 #undef TARGET_HAVE_TLS
11052 #define TARGET_HAVE_TLS HAVE_AS_TLS
11053
11054 #undef TARGET_BUILTIN_DECL
11055 #define TARGET_BUILTIN_DECL alpha_builtin_decl
11056 #undef TARGET_INIT_BUILTINS
11057 #define TARGET_INIT_BUILTINS alpha_init_builtins
11058 #undef TARGET_EXPAND_BUILTIN
11059 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11060 #undef TARGET_FOLD_BUILTIN
11061 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
11062
11063 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11064 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11065 #undef TARGET_CANNOT_COPY_INSN_P
11066 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11067 #undef TARGET_CANNOT_FORCE_CONST_MEM
11068 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11069
11070 #if TARGET_ABI_OSF
11071 #undef TARGET_ASM_OUTPUT_MI_THUNK
11072 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11073 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11074 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11075 #undef TARGET_STDARG_OPTIMIZE_HOOK
11076 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11077 #endif
11078
11079 #undef TARGET_RTX_COSTS
11080 #define TARGET_RTX_COSTS alpha_rtx_costs
11081 #undef TARGET_ADDRESS_COST
11082 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11083
11084 #undef TARGET_MACHINE_DEPENDENT_REORG
11085 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11086
11087 #undef TARGET_PROMOTE_FUNCTION_MODE
11088 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11089 #undef TARGET_PROMOTE_PROTOTYPES
11090 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11091 #undef TARGET_RETURN_IN_MEMORY
11092 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11093 #undef TARGET_PASS_BY_REFERENCE
11094 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11095 #undef TARGET_SETUP_INCOMING_VARARGS
11096 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11097 #undef TARGET_STRICT_ARGUMENT_NAMING
11098 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11099 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11100 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11101 #undef TARGET_SPLIT_COMPLEX_ARG
11102 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11103 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11104 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11105 #undef TARGET_ARG_PARTIAL_BYTES
11106 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11107 #undef TARGET_FUNCTION_ARG
11108 #define TARGET_FUNCTION_ARG alpha_function_arg
11109 #undef TARGET_FUNCTION_ARG_ADVANCE
11110 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
11111 #undef TARGET_TRAMPOLINE_INIT
11112 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
11113
11114 #undef TARGET_SECONDARY_RELOAD
11115 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11116
11117 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11118 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11119 #undef TARGET_VECTOR_MODE_SUPPORTED_P
11120 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11121
11122 #undef TARGET_BUILD_BUILTIN_VA_LIST
11123 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11124
11125 #undef TARGET_EXPAND_BUILTIN_VA_START
11126 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11127
11128 /* The Alpha architecture does not require sequential consistency. See
11129 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11130 for an example of how it can be violated in practice. */
11131 #undef TARGET_RELAXED_ORDERING
11132 #define TARGET_RELAXED_ORDERING true
11133
11134 #undef TARGET_DEFAULT_TARGET_FLAGS
11135 #define TARGET_DEFAULT_TARGET_FLAGS \
11136 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11137 #undef TARGET_HANDLE_OPTION
11138 #define TARGET_HANDLE_OPTION alpha_handle_option
11139
11140 #undef TARGET_OPTION_OVERRIDE
11141 #define TARGET_OPTION_OVERRIDE alpha_option_override
11142
11143 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11144 #undef TARGET_MANGLE_TYPE
11145 #define TARGET_MANGLE_TYPE alpha_mangle_type
11146 #endif
11147
11148 #undef TARGET_LEGITIMATE_ADDRESS_P
11149 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11150
11151 struct gcc_target targetm = TARGET_INITIALIZER;
11152
11153 \f
11154 #include "gt-alpha.h"