alpha.c (TARGET_ASM_UNALIGNED_*_OP): Define if on VMS.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
63
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
66
67 static const char * const alpha_cpu_name[] =
68 {
69 "ev4", "ev5", "ev6"
70 };
71
72 /* Specify how accurate floating-point traps need to be. */
73
74 enum alpha_trap_precision alpha_tp;
75
76 /* Specify the floating-point rounding mode. */
77
78 enum alpha_fp_rounding_mode alpha_fprm;
79
80 /* Specify which things cause traps. */
81
82 enum alpha_fp_trap_mode alpha_fptm;
83
84 /* Nonzero if inside of a function, because the Alpha asm can't
85 handle .files inside of functions. */
86
87 static int inside_function = FALSE;
88
89 /* The number of cycles of latency we should assume on memory reads. */
90
91 int alpha_memory_latency = 3;
92
93 /* Whether the function needs the GP. */
94
95 static int alpha_function_needs_gp;
96
97 /* The alias set for prologue/epilogue register save/restore. */
98
99 static GTY(()) alias_set_type alpha_sr_alias_set;
100
101 /* The assembler name of the current function. */
102
103 static const char *alpha_fnname;
104
105 /* The next explicit relocation sequence number. */
106 extern GTY(()) int alpha_next_sequence_number;
107 int alpha_next_sequence_number = 1;
108
109 /* The literal and gpdisp sequence numbers for this insn, as printed
110 by %# and %* respectively. */
111 extern GTY(()) int alpha_this_literal_sequence_number;
112 extern GTY(()) int alpha_this_gpdisp_sequence_number;
113 int alpha_this_literal_sequence_number;
114 int alpha_this_gpdisp_sequence_number;
115
116 /* Costs of various operations on the different architectures. */
117
118 struct alpha_rtx_cost_data
119 {
120 unsigned char fp_add;
121 unsigned char fp_mult;
122 unsigned char fp_div_sf;
123 unsigned char fp_div_df;
124 unsigned char int_mult_si;
125 unsigned char int_mult_di;
126 unsigned char int_shift;
127 unsigned char int_cmov;
128 unsigned short int_div;
129 };
130
131 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
132 {
133 { /* EV4 */
134 COSTS_N_INSNS (6), /* fp_add */
135 COSTS_N_INSNS (6), /* fp_mult */
136 COSTS_N_INSNS (34), /* fp_div_sf */
137 COSTS_N_INSNS (63), /* fp_div_df */
138 COSTS_N_INSNS (23), /* int_mult_si */
139 COSTS_N_INSNS (23), /* int_mult_di */
140 COSTS_N_INSNS (2), /* int_shift */
141 COSTS_N_INSNS (2), /* int_cmov */
142 COSTS_N_INSNS (97), /* int_div */
143 },
144 { /* EV5 */
145 COSTS_N_INSNS (4), /* fp_add */
146 COSTS_N_INSNS (4), /* fp_mult */
147 COSTS_N_INSNS (15), /* fp_div_sf */
148 COSTS_N_INSNS (22), /* fp_div_df */
149 COSTS_N_INSNS (8), /* int_mult_si */
150 COSTS_N_INSNS (12), /* int_mult_di */
151 COSTS_N_INSNS (1) + 1, /* int_shift */
152 COSTS_N_INSNS (1), /* int_cmov */
153 COSTS_N_INSNS (83), /* int_div */
154 },
155 { /* EV6 */
156 COSTS_N_INSNS (4), /* fp_add */
157 COSTS_N_INSNS (4), /* fp_mult */
158 COSTS_N_INSNS (12), /* fp_div_sf */
159 COSTS_N_INSNS (15), /* fp_div_df */
160 COSTS_N_INSNS (7), /* int_mult_si */
161 COSTS_N_INSNS (7), /* int_mult_di */
162 COSTS_N_INSNS (1), /* int_shift */
163 COSTS_N_INSNS (2), /* int_cmov */
164 COSTS_N_INSNS (86), /* int_div */
165 },
166 };
167
168 /* Similar but tuned for code size instead of execution latency. The
169 extra +N is fractional cost tuning based on latency. It's used to
170 encourage use of cheaper insns like shift, but only if there's just
171 one of them. */
172
173 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
174 {
175 COSTS_N_INSNS (1), /* fp_add */
176 COSTS_N_INSNS (1), /* fp_mult */
177 COSTS_N_INSNS (1), /* fp_div_sf */
178 COSTS_N_INSNS (1) + 1, /* fp_div_df */
179 COSTS_N_INSNS (1) + 1, /* int_mult_si */
180 COSTS_N_INSNS (1) + 2, /* int_mult_di */
181 COSTS_N_INSNS (1), /* int_shift */
182 COSTS_N_INSNS (1), /* int_cmov */
183 COSTS_N_INSNS (6), /* int_div */
184 };
185
186 /* Get the number of args of a function in one of two ways. */
187 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
188 #define NUM_ARGS crtl->args.info.num_args
189 #else
190 #define NUM_ARGS crtl->args.info
191 #endif
192
193 #define REG_PV 27
194 #define REG_RA 26
195
196 /* Declarations of static functions. */
197 static struct machine_function *alpha_init_machine_status (void);
198 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
199
200 #if TARGET_ABI_OPEN_VMS
201 static void alpha_write_linkage (FILE *, const char *, tree);
202 #endif
203
204 static void unicosmk_output_deferred_case_vectors (FILE *);
205 static void unicosmk_gen_dsib (unsigned long *);
206 static void unicosmk_output_ssib (FILE *, const char *);
207 static int unicosmk_need_dex (rtx);
208 \f
209 /* Implement TARGET_HANDLE_OPTION. */
210
211 static bool
212 alpha_handle_option (size_t code, const char *arg, int value)
213 {
214 switch (code)
215 {
216 case OPT_mfp_regs:
217 if (value == 0)
218 target_flags |= MASK_SOFT_FP;
219 break;
220
221 case OPT_mieee:
222 case OPT_mieee_with_inexact:
223 target_flags |= MASK_IEEE_CONFORMANT;
224 break;
225
226 case OPT_mtls_size_:
227 if (value != 16 && value != 32 && value != 64)
228 error ("bad value %qs for -mtls-size switch", arg);
229 break;
230 }
231
232 return true;
233 }
234
235 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
236 /* Implement TARGET_MANGLE_TYPE. */
237
238 static const char *
239 alpha_mangle_type (const_tree type)
240 {
241 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
242 && TARGET_LONG_DOUBLE_128)
243 return "g";
244
245 /* For all other types, use normal C++ mangling. */
246 return NULL;
247 }
248 #endif
249
250 /* Parse target option strings. */
251
252 void
253 override_options (void)
254 {
255 static const struct cpu_table {
256 const char *const name;
257 const enum processor_type processor;
258 const int flags;
259 } cpu_table[] = {
260 { "ev4", PROCESSOR_EV4, 0 },
261 { "ev45", PROCESSOR_EV4, 0 },
262 { "21064", PROCESSOR_EV4, 0 },
263 { "ev5", PROCESSOR_EV5, 0 },
264 { "21164", PROCESSOR_EV5, 0 },
265 { "ev56", PROCESSOR_EV5, MASK_BWX },
266 { "21164a", PROCESSOR_EV5, MASK_BWX },
267 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
268 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
269 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
271 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
272 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
273 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
274 };
275
276 int const ct_size = ARRAY_SIZE (cpu_table);
277 int i;
278
279 /* Unicos/Mk doesn't have shared libraries. */
280 if (TARGET_ABI_UNICOSMK && flag_pic)
281 {
282 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
283 (flag_pic > 1) ? "PIC" : "pic");
284 flag_pic = 0;
285 }
286
287 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
288 floating-point instructions. Make that the default for this target. */
289 if (TARGET_ABI_UNICOSMK)
290 alpha_fprm = ALPHA_FPRM_DYN;
291 else
292 alpha_fprm = ALPHA_FPRM_NORM;
293
294 alpha_tp = ALPHA_TP_PROG;
295 alpha_fptm = ALPHA_FPTM_N;
296
297 /* We cannot use su and sui qualifiers for conversion instructions on
298 Unicos/Mk. I'm not sure if this is due to assembler or hardware
299 limitations. Right now, we issue a warning if -mieee is specified
300 and then ignore it; eventually, we should either get it right or
301 disable the option altogether. */
302
303 if (TARGET_IEEE)
304 {
305 if (TARGET_ABI_UNICOSMK)
306 warning (0, "-mieee not supported on Unicos/Mk");
307 else
308 {
309 alpha_tp = ALPHA_TP_INSN;
310 alpha_fptm = ALPHA_FPTM_SU;
311 }
312 }
313
314 if (TARGET_IEEE_WITH_INEXACT)
315 {
316 if (TARGET_ABI_UNICOSMK)
317 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
318 else
319 {
320 alpha_tp = ALPHA_TP_INSN;
321 alpha_fptm = ALPHA_FPTM_SUI;
322 }
323 }
324
325 if (alpha_tp_string)
326 {
327 if (! strcmp (alpha_tp_string, "p"))
328 alpha_tp = ALPHA_TP_PROG;
329 else if (! strcmp (alpha_tp_string, "f"))
330 alpha_tp = ALPHA_TP_FUNC;
331 else if (! strcmp (alpha_tp_string, "i"))
332 alpha_tp = ALPHA_TP_INSN;
333 else
334 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
335 }
336
337 if (alpha_fprm_string)
338 {
339 if (! strcmp (alpha_fprm_string, "n"))
340 alpha_fprm = ALPHA_FPRM_NORM;
341 else if (! strcmp (alpha_fprm_string, "m"))
342 alpha_fprm = ALPHA_FPRM_MINF;
343 else if (! strcmp (alpha_fprm_string, "c"))
344 alpha_fprm = ALPHA_FPRM_CHOP;
345 else if (! strcmp (alpha_fprm_string,"d"))
346 alpha_fprm = ALPHA_FPRM_DYN;
347 else
348 error ("bad value %qs for -mfp-rounding-mode switch",
349 alpha_fprm_string);
350 }
351
352 if (alpha_fptm_string)
353 {
354 if (strcmp (alpha_fptm_string, "n") == 0)
355 alpha_fptm = ALPHA_FPTM_N;
356 else if (strcmp (alpha_fptm_string, "u") == 0)
357 alpha_fptm = ALPHA_FPTM_U;
358 else if (strcmp (alpha_fptm_string, "su") == 0)
359 alpha_fptm = ALPHA_FPTM_SU;
360 else if (strcmp (alpha_fptm_string, "sui") == 0)
361 alpha_fptm = ALPHA_FPTM_SUI;
362 else
363 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
364 }
365
366 if (alpha_cpu_string)
367 {
368 for (i = 0; i < ct_size; i++)
369 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
370 {
371 alpha_tune = alpha_cpu = cpu_table [i].processor;
372 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
373 target_flags |= cpu_table [i].flags;
374 break;
375 }
376 if (i == ct_size)
377 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
378 }
379
380 if (alpha_tune_string)
381 {
382 for (i = 0; i < ct_size; i++)
383 if (! strcmp (alpha_tune_string, cpu_table [i].name))
384 {
385 alpha_tune = cpu_table [i].processor;
386 break;
387 }
388 if (i == ct_size)
389 error ("bad value %qs for -mcpu switch", alpha_tune_string);
390 }
391
392 /* Do some sanity checks on the above options. */
393
394 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
395 {
396 warning (0, "trap mode not supported on Unicos/Mk");
397 alpha_fptm = ALPHA_FPTM_N;
398 }
399
400 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
401 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
402 {
403 warning (0, "fp software completion requires -mtrap-precision=i");
404 alpha_tp = ALPHA_TP_INSN;
405 }
406
407 if (alpha_cpu == PROCESSOR_EV6)
408 {
409 /* Except for EV6 pass 1 (not released), we always have precise
410 arithmetic traps. Which means we can do software completion
411 without minding trap shadows. */
412 alpha_tp = ALPHA_TP_PROG;
413 }
414
415 if (TARGET_FLOAT_VAX)
416 {
417 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
418 {
419 warning (0, "rounding mode not supported for VAX floats");
420 alpha_fprm = ALPHA_FPRM_NORM;
421 }
422 if (alpha_fptm == ALPHA_FPTM_SUI)
423 {
424 warning (0, "trap mode not supported for VAX floats");
425 alpha_fptm = ALPHA_FPTM_SU;
426 }
427 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
428 warning (0, "128-bit long double not supported for VAX floats");
429 target_flags &= ~MASK_LONG_DOUBLE_128;
430 }
431
432 {
433 char *end;
434 int lat;
435
436 if (!alpha_mlat_string)
437 alpha_mlat_string = "L1";
438
439 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
440 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
441 ;
442 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
443 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
444 && alpha_mlat_string[2] == '\0')
445 {
446 static int const cache_latency[][4] =
447 {
448 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
449 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
450 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
451 };
452
453 lat = alpha_mlat_string[1] - '0';
454 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
455 {
456 warning (0, "L%d cache latency unknown for %s",
457 lat, alpha_cpu_name[alpha_tune]);
458 lat = 3;
459 }
460 else
461 lat = cache_latency[alpha_tune][lat-1];
462 }
463 else if (! strcmp (alpha_mlat_string, "main"))
464 {
465 /* Most current memories have about 370ns latency. This is
466 a reasonable guess for a fast cpu. */
467 lat = 150;
468 }
469 else
470 {
471 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
472 lat = 3;
473 }
474
475 alpha_memory_latency = lat;
476 }
477
478 /* Default the definition of "small data" to 8 bytes. */
479 if (!g_switch_set)
480 g_switch_value = 8;
481
482 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
483 if (flag_pic == 1)
484 target_flags |= MASK_SMALL_DATA;
485 else if (flag_pic == 2)
486 target_flags &= ~MASK_SMALL_DATA;
487
488 /* Align labels and loops for optimal branching. */
489 /* ??? Kludge these by not doing anything if we don't optimize and also if
490 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
491 if (optimize > 0 && write_symbols != SDB_DEBUG)
492 {
493 if (align_loops <= 0)
494 align_loops = 16;
495 if (align_jumps <= 0)
496 align_jumps = 16;
497 }
498 if (align_functions <= 0)
499 align_functions = 16;
500
501 /* Acquire a unique set number for our register saves and restores. */
502 alpha_sr_alias_set = new_alias_set ();
503
504 /* Register variables and functions with the garbage collector. */
505
506 /* Set up function hooks. */
507 init_machine_status = alpha_init_machine_status;
508
509 /* Tell the compiler when we're using VAX floating point. */
510 if (TARGET_FLOAT_VAX)
511 {
512 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
513 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
514 REAL_MODE_FORMAT (TFmode) = NULL;
515 }
516
517 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
518 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
519 target_flags |= MASK_LONG_DOUBLE_128;
520 #endif
521
522 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
523 can be optimized to ap = __builtin_next_arg (0). */
524 if (TARGET_ABI_UNICOSMK)
525 targetm.expand_builtin_va_start = NULL;
526 }
527 \f
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529
530 int
531 zap_mask (HOST_WIDE_INT value)
532 {
533 int i;
534
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
536 i++, value >>= 8)
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
538 return 0;
539
540 return 1;
541 }
542
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
545
546 int
547 tls_symbolic_operand_1 (rtx op, int size, int unspec)
548 {
549 op = XEXP (op, 0);
550
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
552 return 0;
553 op = XVECEXP (op, 0, 0);
554
555 if (GET_CODE (op) != SYMBOL_REF)
556 return 0;
557
558 switch (SYMBOL_REF_TLS_MODEL (op))
559 {
560 case TLS_MODEL_LOCAL_DYNAMIC:
561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
562 case TLS_MODEL_INITIAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == 64;
564 case TLS_MODEL_LOCAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
566 default:
567 gcc_unreachable ();
568 }
569 }
570
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
573
574 rtx
575 resolve_reload_operand (rtx op)
576 {
577 if (reload_in_progress)
578 {
579 rtx tmp = op;
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
582 if (REG_P (tmp)
583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 {
585 op = reg_equiv_memory_loc[REGNO (tmp)];
586 if (op == 0)
587 return 0;
588 }
589 }
590 return op;
591 }
592
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
597
598 static bool
599 alpha_scalar_mode_supported_p (enum machine_mode mode)
600 {
601 switch (mode)
602 {
603 case QImode:
604 case HImode:
605 case SImode:
606 case DImode:
607 case TImode: /* via optabs.c */
608 return true;
609
610 case SFmode:
611 case DFmode:
612 return true;
613
614 case TFmode:
615 return TARGET_HAS_XFLOATING_LIBS;
616
617 default:
618 return false;
619 }
620 }
621
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
626
627 static bool
628 alpha_vector_mode_supported_p (enum machine_mode mode)
629 {
630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
631 }
632
633 /* Return 1 if this function can directly return via $26. */
634
635 int
636 direct_return (void)
637 {
638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
639 && reload_completed
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && crtl->outgoing_args_size == 0
643 && crtl->args.pretend_args_size == 0);
644 }
645
646 /* Return the ADDR_VEC associated with a tablejump insn. */
647
648 rtx
649 alpha_tablejump_addr_vec (rtx insn)
650 {
651 rtx tmp;
652
653 tmp = JUMP_LABEL (insn);
654 if (!tmp)
655 return NULL_RTX;
656 tmp = NEXT_INSN (tmp);
657 if (!tmp)
658 return NULL_RTX;
659 if (JUMP_P (tmp)
660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
662 return NULL_RTX;
663 }
664
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
666
667 rtx
668 alpha_tablejump_best_label (rtx insn)
669 {
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
672
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
675
676 if (jump_table)
677 {
678 int n_labels = XVECLEN (jump_table, 1);
679 int best_count = -1;
680 int i, j;
681
682 for (i = 0; i < n_labels; i++)
683 {
684 int count = 1;
685
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
689 count++;
690
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
693 }
694 }
695
696 return best_label ? best_label : const0_rtx;
697 }
698
699 /* Return the TLS model to use for SYMBOL. */
700
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol)
703 {
704 enum tls_model model;
705
706 if (GET_CODE (symbol) != SYMBOL_REF)
707 return TLS_MODEL_NONE;
708 model = SYMBOL_REF_TLS_MODEL (symbol);
709
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
713
714 return model;
715 }
716 \f
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
719
720 static bool
721 decl_has_samegp (const_tree decl)
722 {
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
726 return false;
727
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
733 return true;
734
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
739 }
740
741 /* Return true if EXP should be placed in the small data section. */
742
743 static bool
744 alpha_in_small_data_p (const_tree exp)
745 {
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
748 return false;
749
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
752 return false;
753
754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 {
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
759 return true;
760 }
761 else
762 {
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
768 return true;
769 }
770
771 return false;
772 }
773
774 #if TARGET_ABI_OPEN_VMS
775 static bool
776 alpha_linkage_symbol_p (const char *symname)
777 {
778 int symlen = strlen (symname);
779
780 if (symlen > 4)
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
782
783 return false;
784 }
785
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
793 #endif
794
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
798
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
803
804 static bool
805 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
806 {
807 /* If this is an ldq_u type address, discard the outer AND. */
808 if (mode == DImode
809 && GET_CODE (x) == AND
810 && CONST_INT_P (XEXP (x, 1))
811 && INTVAL (XEXP (x, 1)) == -8)
812 x = XEXP (x, 0);
813
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
818 x = SUBREG_REG (x);
819
820 /* Unadorned general registers are valid. */
821 if (REG_P (x)
822 && (strict
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
825 return true;
826
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
829 return true;
830
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
833 return true;
834 #endif
835
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
838 {
839 rtx ofs = XEXP (x, 1);
840 x = XEXP (x, 0);
841
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
846 x = SUBREG_REG (x);
847
848 if (REG_P (x))
849 {
850 if (! strict
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
852 && CONST_INT_P (ofs))
853 return true;
854 if ((strict
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
858 return true;
859 }
860 }
861
862 /* If we're managing explicit relocations, LO_SUM is valid, as are small
863 data symbols. Avoid explicit relocations of modes larger than word
864 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
865 else if (TARGET_EXPLICIT_RELOCS
866 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
867 {
868 if (small_symbolic_operand (x, Pmode))
869 return true;
870
871 if (GET_CODE (x) == LO_SUM)
872 {
873 rtx ofs = XEXP (x, 1);
874 x = XEXP (x, 0);
875
876 /* Discard non-paradoxical subregs. */
877 if (GET_CODE (x) == SUBREG
878 && (GET_MODE_SIZE (GET_MODE (x))
879 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
880 x = SUBREG_REG (x);
881
882 /* Must have a valid base register. */
883 if (! (REG_P (x)
884 && (strict
885 ? STRICT_REG_OK_FOR_BASE_P (x)
886 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
887 return false;
888
889 /* The symbol must be local. */
890 if (local_symbolic_operand (ofs, Pmode)
891 || dtp32_symbolic_operand (ofs, Pmode)
892 || tp32_symbolic_operand (ofs, Pmode))
893 return true;
894 }
895 }
896
897 return false;
898 }
899
900 /* Build the SYMBOL_REF for __tls_get_addr. */
901
902 static GTY(()) rtx tls_get_addr_libfunc;
903
904 static rtx
905 get_tls_get_addr (void)
906 {
907 if (!tls_get_addr_libfunc)
908 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
909 return tls_get_addr_libfunc;
910 }
911
912 /* Try machine-dependent ways of modifying an illegitimate address
913 to be legitimate. If we find one, return the new, valid address. */
914
915 static rtx
916 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
917 {
918 HOST_WIDE_INT addend;
919
920 /* If the address is (plus reg const_int) and the CONST_INT is not a
921 valid offset, compute the high part of the constant and add it to
922 the register. Then our address is (plus temp low-part-const). */
923 if (GET_CODE (x) == PLUS
924 && REG_P (XEXP (x, 0))
925 && CONST_INT_P (XEXP (x, 1))
926 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
927 {
928 addend = INTVAL (XEXP (x, 1));
929 x = XEXP (x, 0);
930 goto split_addend;
931 }
932
933 /* If the address is (const (plus FOO const_int)), find the low-order
934 part of the CONST_INT. Then load FOO plus any high-order part of the
935 CONST_INT into a register. Our address is (plus reg low-part-const).
936 This is done to reduce the number of GOT entries. */
937 if (can_create_pseudo_p ()
938 && GET_CODE (x) == CONST
939 && GET_CODE (XEXP (x, 0)) == PLUS
940 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
941 {
942 addend = INTVAL (XEXP (XEXP (x, 0), 1));
943 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
944 goto split_addend;
945 }
946
947 /* If we have a (plus reg const), emit the load as in (2), then add
948 the two registers, and finally generate (plus reg low-part-const) as
949 our address. */
950 if (can_create_pseudo_p ()
951 && GET_CODE (x) == PLUS
952 && REG_P (XEXP (x, 0))
953 && GET_CODE (XEXP (x, 1)) == CONST
954 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
955 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
956 {
957 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
958 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
959 XEXP (XEXP (XEXP (x, 1), 0), 0),
960 NULL_RTX, 1, OPTAB_LIB_WIDEN);
961 goto split_addend;
962 }
963
964 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
965 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
966 around +/- 32k offset. */
967 if (TARGET_EXPLICIT_RELOCS
968 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
969 && symbolic_operand (x, Pmode))
970 {
971 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
972
973 switch (tls_symbolic_operand_type (x))
974 {
975 case TLS_MODEL_NONE:
976 break;
977
978 case TLS_MODEL_GLOBAL_DYNAMIC:
979 start_sequence ();
980
981 r0 = gen_rtx_REG (Pmode, 0);
982 r16 = gen_rtx_REG (Pmode, 16);
983 tga = get_tls_get_addr ();
984 dest = gen_reg_rtx (Pmode);
985 seq = GEN_INT (alpha_next_sequence_number++);
986
987 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
988 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
989 insn = emit_call_insn (insn);
990 RTL_CONST_CALL_P (insn) = 1;
991 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
992
993 insn = get_insns ();
994 end_sequence ();
995
996 emit_libcall_block (insn, dest, r0, x);
997 return dest;
998
999 case TLS_MODEL_LOCAL_DYNAMIC:
1000 start_sequence ();
1001
1002 r0 = gen_rtx_REG (Pmode, 0);
1003 r16 = gen_rtx_REG (Pmode, 16);
1004 tga = get_tls_get_addr ();
1005 scratch = gen_reg_rtx (Pmode);
1006 seq = GEN_INT (alpha_next_sequence_number++);
1007
1008 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1009 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1010 insn = emit_call_insn (insn);
1011 RTL_CONST_CALL_P (insn) = 1;
1012 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1013
1014 insn = get_insns ();
1015 end_sequence ();
1016
1017 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1018 UNSPEC_TLSLDM_CALL);
1019 emit_libcall_block (insn, scratch, r0, eqv);
1020
1021 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1022 eqv = gen_rtx_CONST (Pmode, eqv);
1023
1024 if (alpha_tls_size == 64)
1025 {
1026 dest = gen_reg_rtx (Pmode);
1027 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1028 emit_insn (gen_adddi3 (dest, dest, scratch));
1029 return dest;
1030 }
1031 if (alpha_tls_size == 32)
1032 {
1033 insn = gen_rtx_HIGH (Pmode, eqv);
1034 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1035 scratch = gen_reg_rtx (Pmode);
1036 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1037 }
1038 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1039
1040 case TLS_MODEL_INITIAL_EXEC:
1041 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1042 eqv = gen_rtx_CONST (Pmode, eqv);
1043 tp = gen_reg_rtx (Pmode);
1044 scratch = gen_reg_rtx (Pmode);
1045 dest = gen_reg_rtx (Pmode);
1046
1047 emit_insn (gen_load_tp (tp));
1048 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1049 emit_insn (gen_adddi3 (dest, tp, scratch));
1050 return dest;
1051
1052 case TLS_MODEL_LOCAL_EXEC:
1053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1054 eqv = gen_rtx_CONST (Pmode, eqv);
1055 tp = gen_reg_rtx (Pmode);
1056
1057 emit_insn (gen_load_tp (tp));
1058 if (alpha_tls_size == 32)
1059 {
1060 insn = gen_rtx_HIGH (Pmode, eqv);
1061 insn = gen_rtx_PLUS (Pmode, tp, insn);
1062 tp = gen_reg_rtx (Pmode);
1063 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1064 }
1065 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1066
1067 default:
1068 gcc_unreachable ();
1069 }
1070
1071 if (local_symbolic_operand (x, Pmode))
1072 {
1073 if (small_symbolic_operand (x, Pmode))
1074 return x;
1075 else
1076 {
1077 if (can_create_pseudo_p ())
1078 scratch = gen_reg_rtx (Pmode);
1079 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1080 gen_rtx_HIGH (Pmode, x)));
1081 return gen_rtx_LO_SUM (Pmode, scratch, x);
1082 }
1083 }
1084 }
1085
1086 return NULL;
1087
1088 split_addend:
1089 {
1090 HOST_WIDE_INT low, high;
1091
1092 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1093 addend -= low;
1094 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1095 addend -= high;
1096
1097 if (addend)
1098 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1099 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1100 1, OPTAB_LIB_WIDEN);
1101 if (high)
1102 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1103 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1104 1, OPTAB_LIB_WIDEN);
1105
1106 return plus_constant (x, low);
1107 }
1108 }
1109
1110
1111 /* Try machine-dependent ways of modifying an illegitimate address
1112 to be legitimate. Return X or the new, valid address. */
1113
1114 static rtx
1115 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1116 enum machine_mode mode)
1117 {
1118 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1119 return new_x ? new_x : x;
1120 }
1121
1122 /* Primarily this is required for TLS symbols, but given that our move
1123 patterns *ought* to be able to handle any symbol at any time, we
1124 should never be spilling symbolic operands to the constant pool, ever. */
1125
1126 static bool
1127 alpha_cannot_force_const_mem (rtx x)
1128 {
1129 enum rtx_code code = GET_CODE (x);
1130 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1131 }
1132
1133 /* We do not allow indirect calls to be optimized into sibling calls, nor
1134 can we allow a call to a function with a different GP to be optimized
1135 into a sibcall. */
1136
1137 static bool
1138 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1139 {
1140 /* Can't do indirect tail calls, since we don't know if the target
1141 uses the same GP. */
1142 if (!decl)
1143 return false;
1144
1145 /* Otherwise, we can make a tail call if the target function shares
1146 the same GP. */
1147 return decl_has_samegp (decl);
1148 }
1149
1150 int
1151 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1152 {
1153 rtx x = *px;
1154
1155 /* Don't re-split. */
1156 if (GET_CODE (x) == LO_SUM)
1157 return -1;
1158
1159 return small_symbolic_operand (x, Pmode) != 0;
1160 }
1161
1162 static int
1163 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1164 {
1165 rtx x = *px;
1166
1167 /* Don't re-split. */
1168 if (GET_CODE (x) == LO_SUM)
1169 return -1;
1170
1171 if (small_symbolic_operand (x, Pmode))
1172 {
1173 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1174 *px = x;
1175 return -1;
1176 }
1177
1178 return 0;
1179 }
1180
1181 rtx
1182 split_small_symbolic_operand (rtx x)
1183 {
1184 x = copy_insn (x);
1185 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1186 return x;
1187 }
1188
1189 /* Indicate that INSN cannot be duplicated. This is true for any insn
1190 that we've marked with gpdisp relocs, since those have to stay in
1191 1-1 correspondence with one another.
1192
1193 Technically we could copy them if we could set up a mapping from one
1194 sequence number to another, across the set of insns to be duplicated.
1195 This seems overly complicated and error-prone since interblock motion
1196 from sched-ebb could move one of the pair of insns to a different block.
1197
1198 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1199 then they'll be in a different block from their ldgp. Which could lead
1200 the bb reorder code to think that it would be ok to copy just the block
1201 containing the call and branch to the block containing the ldgp. */
1202
1203 static bool
1204 alpha_cannot_copy_insn_p (rtx insn)
1205 {
1206 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1207 return false;
1208 if (recog_memoized (insn) >= 0)
1209 return get_attr_cannot_copy (insn);
1210 else
1211 return false;
1212 }
1213
1214
1215 /* Try a machine-dependent way of reloading an illegitimate address
1216 operand. If we find one, push the reload and return the new rtx. */
1217
1218 rtx
1219 alpha_legitimize_reload_address (rtx x,
1220 enum machine_mode mode ATTRIBUTE_UNUSED,
1221 int opnum, int type,
1222 int ind_levels ATTRIBUTE_UNUSED)
1223 {
1224 /* We must recognize output that we have already generated ourselves. */
1225 if (GET_CODE (x) == PLUS
1226 && GET_CODE (XEXP (x, 0)) == PLUS
1227 && REG_P (XEXP (XEXP (x, 0), 0))
1228 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1229 && CONST_INT_P (XEXP (x, 1)))
1230 {
1231 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1232 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1233 opnum, (enum reload_type) type);
1234 return x;
1235 }
1236
1237 /* We wish to handle large displacements off a base register by
1238 splitting the addend across an ldah and the mem insn. This
1239 cuts number of extra insns needed from 3 to 1. */
1240 if (GET_CODE (x) == PLUS
1241 && REG_P (XEXP (x, 0))
1242 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1243 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1244 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1245 {
1246 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1247 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1248 HOST_WIDE_INT high
1249 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1250
1251 /* Check for 32-bit overflow. */
1252 if (high + low != val)
1253 return NULL_RTX;
1254
1255 /* Reload the high part into a base reg; leave the low part
1256 in the mem directly. */
1257 x = gen_rtx_PLUS (GET_MODE (x),
1258 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1259 GEN_INT (high)),
1260 GEN_INT (low));
1261
1262 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1263 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1264 opnum, (enum reload_type) type);
1265 return x;
1266 }
1267
1268 return NULL_RTX;
1269 }
1270 \f
1271 /* Compute a (partial) cost for rtx X. Return true if the complete
1272 cost has been computed, and false if subexpressions should be
1273 scanned. In either case, *TOTAL contains the cost result. */
1274
1275 static bool
1276 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1277 bool speed)
1278 {
1279 enum machine_mode mode = GET_MODE (x);
1280 bool float_mode_p = FLOAT_MODE_P (mode);
1281 const struct alpha_rtx_cost_data *cost_data;
1282
1283 if (!speed)
1284 cost_data = &alpha_rtx_cost_size;
1285 else
1286 cost_data = &alpha_rtx_cost_data[alpha_tune];
1287
1288 switch (code)
1289 {
1290 case CONST_INT:
1291 /* If this is an 8-bit constant, return zero since it can be used
1292 nearly anywhere with no cost. If it is a valid operand for an
1293 ADD or AND, likewise return 0 if we know it will be used in that
1294 context. Otherwise, return 2 since it might be used there later.
1295 All other constants take at least two insns. */
1296 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1297 {
1298 *total = 0;
1299 return true;
1300 }
1301 /* FALLTHRU */
1302
1303 case CONST_DOUBLE:
1304 if (x == CONST0_RTX (mode))
1305 *total = 0;
1306 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1307 || (outer_code == AND && and_operand (x, VOIDmode)))
1308 *total = 0;
1309 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1310 *total = 2;
1311 else
1312 *total = COSTS_N_INSNS (2);
1313 return true;
1314
1315 case CONST:
1316 case SYMBOL_REF:
1317 case LABEL_REF:
1318 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1319 *total = COSTS_N_INSNS (outer_code != MEM);
1320 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1321 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1322 else if (tls_symbolic_operand_type (x))
1323 /* Estimate of cost for call_pal rduniq. */
1324 /* ??? How many insns do we emit here? More than one... */
1325 *total = COSTS_N_INSNS (15);
1326 else
1327 /* Otherwise we do a load from the GOT. */
1328 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1329 return true;
1330
1331 case HIGH:
1332 /* This is effectively an add_operand. */
1333 *total = 2;
1334 return true;
1335
1336 case PLUS:
1337 case MINUS:
1338 if (float_mode_p)
1339 *total = cost_data->fp_add;
1340 else if (GET_CODE (XEXP (x, 0)) == MULT
1341 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1342 {
1343 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1344 (enum rtx_code) outer_code, speed)
1345 + rtx_cost (XEXP (x, 1),
1346 (enum rtx_code) outer_code, speed)
1347 + COSTS_N_INSNS (1));
1348 return true;
1349 }
1350 return false;
1351
1352 case MULT:
1353 if (float_mode_p)
1354 *total = cost_data->fp_mult;
1355 else if (mode == DImode)
1356 *total = cost_data->int_mult_di;
1357 else
1358 *total = cost_data->int_mult_si;
1359 return false;
1360
1361 case ASHIFT:
1362 if (CONST_INT_P (XEXP (x, 1))
1363 && INTVAL (XEXP (x, 1)) <= 3)
1364 {
1365 *total = COSTS_N_INSNS (1);
1366 return false;
1367 }
1368 /* FALLTHRU */
1369
1370 case ASHIFTRT:
1371 case LSHIFTRT:
1372 *total = cost_data->int_shift;
1373 return false;
1374
1375 case IF_THEN_ELSE:
1376 if (float_mode_p)
1377 *total = cost_data->fp_add;
1378 else
1379 *total = cost_data->int_cmov;
1380 return false;
1381
1382 case DIV:
1383 case UDIV:
1384 case MOD:
1385 case UMOD:
1386 if (!float_mode_p)
1387 *total = cost_data->int_div;
1388 else if (mode == SFmode)
1389 *total = cost_data->fp_div_sf;
1390 else
1391 *total = cost_data->fp_div_df;
1392 return false;
1393
1394 case MEM:
1395 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1396 return true;
1397
1398 case NEG:
1399 if (! float_mode_p)
1400 {
1401 *total = COSTS_N_INSNS (1);
1402 return false;
1403 }
1404 /* FALLTHRU */
1405
1406 case ABS:
1407 if (! float_mode_p)
1408 {
1409 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1410 return false;
1411 }
1412 /* FALLTHRU */
1413
1414 case FLOAT:
1415 case UNSIGNED_FLOAT:
1416 case FIX:
1417 case UNSIGNED_FIX:
1418 case FLOAT_TRUNCATE:
1419 *total = cost_data->fp_add;
1420 return false;
1421
1422 case FLOAT_EXTEND:
1423 if (MEM_P (XEXP (x, 0)))
1424 *total = 0;
1425 else
1426 *total = cost_data->fp_add;
1427 return false;
1428
1429 default:
1430 return false;
1431 }
1432 }
1433 \f
1434 /* REF is an alignable memory location. Place an aligned SImode
1435 reference into *PALIGNED_MEM and the number of bits to shift into
1436 *PBITNUM. SCRATCH is a free register for use in reloading out
1437 of range stack slots. */
1438
1439 void
1440 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1441 {
1442 rtx base;
1443 HOST_WIDE_INT disp, offset;
1444
1445 gcc_assert (MEM_P (ref));
1446
1447 if (reload_in_progress
1448 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1449 {
1450 base = find_replacement (&XEXP (ref, 0));
1451 gcc_assert (memory_address_p (GET_MODE (ref), base));
1452 }
1453 else
1454 base = XEXP (ref, 0);
1455
1456 if (GET_CODE (base) == PLUS)
1457 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1458 else
1459 disp = 0;
1460
1461 /* Find the byte offset within an aligned word. If the memory itself is
1462 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1463 will have examined the base register and determined it is aligned, and
1464 thus displacements from it are naturally alignable. */
1465 if (MEM_ALIGN (ref) >= 32)
1466 offset = 0;
1467 else
1468 offset = disp & 3;
1469
1470 /* Access the entire aligned word. */
1471 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1472
1473 /* Convert the byte offset within the word to a bit offset. */
1474 if (WORDS_BIG_ENDIAN)
1475 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1476 else
1477 offset *= 8;
1478 *pbitnum = GEN_INT (offset);
1479 }
1480
1481 /* Similar, but just get the address. Handle the two reload cases.
1482 Add EXTRA_OFFSET to the address we return. */
1483
1484 rtx
1485 get_unaligned_address (rtx ref)
1486 {
1487 rtx base;
1488 HOST_WIDE_INT offset = 0;
1489
1490 gcc_assert (MEM_P (ref));
1491
1492 if (reload_in_progress
1493 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1494 {
1495 base = find_replacement (&XEXP (ref, 0));
1496
1497 gcc_assert (memory_address_p (GET_MODE (ref), base));
1498 }
1499 else
1500 base = XEXP (ref, 0);
1501
1502 if (GET_CODE (base) == PLUS)
1503 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1504
1505 return plus_constant (base, offset);
1506 }
1507
1508 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1509 X is always returned in a register. */
1510
1511 rtx
1512 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1513 {
1514 if (GET_CODE (addr) == PLUS)
1515 {
1516 ofs += INTVAL (XEXP (addr, 1));
1517 addr = XEXP (addr, 0);
1518 }
1519
1520 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1522 }
1523
1524 /* On the Alpha, all (non-symbolic) constants except zero go into
1525 a floating-point register via memory. Note that we cannot
1526 return anything that is not a subset of RCLASS, and that some
1527 symbolic constants cannot be dropped to memory. */
1528
1529 enum reg_class
1530 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1531 {
1532 /* Zero is present in any register class. */
1533 if (x == CONST0_RTX (GET_MODE (x)))
1534 return rclass;
1535
1536 /* These sorts of constants we can easily drop to memory. */
1537 if (CONST_INT_P (x)
1538 || GET_CODE (x) == CONST_DOUBLE
1539 || GET_CODE (x) == CONST_VECTOR)
1540 {
1541 if (rclass == FLOAT_REGS)
1542 return NO_REGS;
1543 if (rclass == ALL_REGS)
1544 return GENERAL_REGS;
1545 return rclass;
1546 }
1547
1548 /* All other kinds of constants should not (and in the case of HIGH
1549 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1550 secondary reload. */
1551 if (CONSTANT_P (x))
1552 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1553
1554 return rclass;
1555 }
1556
1557 /* Inform reload about cases where moving X with a mode MODE to a register in
1558 RCLASS requires an extra scratch or immediate register. Return the class
1559 needed for the immediate register. */
1560
1561 static enum reg_class
1562 alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1563 enum machine_mode mode, secondary_reload_info *sri)
1564 {
1565 /* Loading and storing HImode or QImode values to and from memory
1566 usually requires a scratch register. */
1567 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1568 {
1569 if (any_memory_operand (x, mode))
1570 {
1571 if (in_p)
1572 {
1573 if (!aligned_memory_operand (x, mode))
1574 sri->icode = reload_in_optab[mode];
1575 }
1576 else
1577 sri->icode = reload_out_optab[mode];
1578 return NO_REGS;
1579 }
1580 }
1581
1582 /* We also cannot do integral arithmetic into FP regs, as might result
1583 from register elimination into a DImode fp register. */
1584 if (rclass == FLOAT_REGS)
1585 {
1586 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1587 return GENERAL_REGS;
1588 if (in_p && INTEGRAL_MODE_P (mode)
1589 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1590 return GENERAL_REGS;
1591 }
1592
1593 return NO_REGS;
1594 }
1595 \f
1596 /* Subfunction of the following function. Update the flags of any MEM
1597 found in part of X. */
1598
1599 static int
1600 alpha_set_memflags_1 (rtx *xp, void *data)
1601 {
1602 rtx x = *xp, orig = (rtx) data;
1603
1604 if (!MEM_P (x))
1605 return 0;
1606
1607 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1608 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1609 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1610 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1611 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1612
1613 /* Sadly, we cannot use alias sets because the extra aliasing
1614 produced by the AND interferes. Given that two-byte quantities
1615 are the only thing we would be able to differentiate anyway,
1616 there does not seem to be any point in convoluting the early
1617 out of the alias check. */
1618
1619 return -1;
1620 }
1621
1622 /* Given SEQ, which is an INSN list, look for any MEMs in either
1623 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1624 volatile flags from REF into each of the MEMs found. If REF is not
1625 a MEM, don't do anything. */
1626
1627 void
1628 alpha_set_memflags (rtx seq, rtx ref)
1629 {
1630 rtx insn;
1631
1632 if (!MEM_P (ref))
1633 return;
1634
1635 /* This is only called from alpha.md, after having had something
1636 generated from one of the insn patterns. So if everything is
1637 zero, the pattern is already up-to-date. */
1638 if (!MEM_VOLATILE_P (ref)
1639 && !MEM_IN_STRUCT_P (ref)
1640 && !MEM_SCALAR_P (ref)
1641 && !MEM_NOTRAP_P (ref)
1642 && !MEM_READONLY_P (ref))
1643 return;
1644
1645 for (insn = seq; insn; insn = NEXT_INSN (insn))
1646 if (INSN_P (insn))
1647 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1648 else
1649 gcc_unreachable ();
1650 }
1651 \f
1652 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1653 int, bool);
1654
1655 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1656 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1657 and return pc_rtx if successful. */
1658
1659 static rtx
1660 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1661 HOST_WIDE_INT c, int n, bool no_output)
1662 {
1663 HOST_WIDE_INT new_const;
1664 int i, bits;
1665 /* Use a pseudo if highly optimizing and still generating RTL. */
1666 rtx subtarget
1667 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1668 rtx temp, insn;
1669
1670 /* If this is a sign-extended 32-bit constant, we can do this in at most
1671 three insns, so do it if we have enough insns left. We always have
1672 a sign-extended 32-bit constant when compiling on a narrow machine. */
1673
1674 if (HOST_BITS_PER_WIDE_INT != 64
1675 || c >> 31 == -1 || c >> 31 == 0)
1676 {
1677 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1678 HOST_WIDE_INT tmp1 = c - low;
1679 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1680 HOST_WIDE_INT extra = 0;
1681
1682 /* If HIGH will be interpreted as negative but the constant is
1683 positive, we must adjust it to do two ldha insns. */
1684
1685 if ((high & 0x8000) != 0 && c >= 0)
1686 {
1687 extra = 0x4000;
1688 tmp1 -= 0x40000000;
1689 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1690 }
1691
1692 if (c == low || (low == 0 && extra == 0))
1693 {
1694 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1695 but that meant that we can't handle INT_MIN on 32-bit machines
1696 (like NT/Alpha), because we recurse indefinitely through
1697 emit_move_insn to gen_movdi. So instead, since we know exactly
1698 what we want, create it explicitly. */
1699
1700 if (no_output)
1701 return pc_rtx;
1702 if (target == NULL)
1703 target = gen_reg_rtx (mode);
1704 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1705 return target;
1706 }
1707 else if (n >= 2 + (extra != 0))
1708 {
1709 if (no_output)
1710 return pc_rtx;
1711 if (!can_create_pseudo_p ())
1712 {
1713 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1714 temp = target;
1715 }
1716 else
1717 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1718 subtarget, mode);
1719
1720 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1721 This means that if we go through expand_binop, we'll try to
1722 generate extensions, etc, which will require new pseudos, which
1723 will fail during some split phases. The SImode add patterns
1724 still exist, but are not named. So build the insns by hand. */
1725
1726 if (extra != 0)
1727 {
1728 if (! subtarget)
1729 subtarget = gen_reg_rtx (mode);
1730 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1731 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1732 emit_insn (insn);
1733 temp = subtarget;
1734 }
1735
1736 if (target == NULL)
1737 target = gen_reg_rtx (mode);
1738 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1739 insn = gen_rtx_SET (VOIDmode, target, insn);
1740 emit_insn (insn);
1741 return target;
1742 }
1743 }
1744
1745 /* If we couldn't do it that way, try some other methods. But if we have
1746 no instructions left, don't bother. Likewise, if this is SImode and
1747 we can't make pseudos, we can't do anything since the expand_binop
1748 and expand_unop calls will widen and try to make pseudos. */
1749
1750 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1751 return 0;
1752
1753 /* Next, see if we can load a related constant and then shift and possibly
1754 negate it to get the constant we want. Try this once each increasing
1755 numbers of insns. */
1756
1757 for (i = 1; i < n; i++)
1758 {
1759 /* First, see if minus some low bits, we've an easy load of
1760 high bits. */
1761
1762 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1763 if (new_const != 0)
1764 {
1765 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1766 if (temp)
1767 {
1768 if (no_output)
1769 return temp;
1770 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1771 target, 0, OPTAB_WIDEN);
1772 }
1773 }
1774
1775 /* Next try complementing. */
1776 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1777 if (temp)
1778 {
1779 if (no_output)
1780 return temp;
1781 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1782 }
1783
1784 /* Next try to form a constant and do a left shift. We can do this
1785 if some low-order bits are zero; the exact_log2 call below tells
1786 us that information. The bits we are shifting out could be any
1787 value, but here we'll just try the 0- and sign-extended forms of
1788 the constant. To try to increase the chance of having the same
1789 constant in more than one insn, start at the highest number of
1790 bits to shift, but try all possibilities in case a ZAPNOT will
1791 be useful. */
1792
1793 bits = exact_log2 (c & -c);
1794 if (bits > 0)
1795 for (; bits > 0; bits--)
1796 {
1797 new_const = c >> bits;
1798 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1799 if (!temp && c < 0)
1800 {
1801 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1802 temp = alpha_emit_set_const (subtarget, mode, new_const,
1803 i, no_output);
1804 }
1805 if (temp)
1806 {
1807 if (no_output)
1808 return temp;
1809 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1810 target, 0, OPTAB_WIDEN);
1811 }
1812 }
1813
1814 /* Now try high-order zero bits. Here we try the shifted-in bits as
1815 all zero and all ones. Be careful to avoid shifting outside the
1816 mode and to avoid shifting outside the host wide int size. */
1817 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1818 confuse the recursive call and set all of the high 32 bits. */
1819
1820 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1821 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1822 if (bits > 0)
1823 for (; bits > 0; bits--)
1824 {
1825 new_const = c << bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1827 if (!temp)
1828 {
1829 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1830 temp = alpha_emit_set_const (subtarget, mode, new_const,
1831 i, no_output);
1832 }
1833 if (temp)
1834 {
1835 if (no_output)
1836 return temp;
1837 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1838 target, 1, OPTAB_WIDEN);
1839 }
1840 }
1841
1842 /* Now try high-order 1 bits. We get that with a sign-extension.
1843 But one bit isn't enough here. Be careful to avoid shifting outside
1844 the mode and to avoid shifting outside the host wide int size. */
1845
1846 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1847 - floor_log2 (~ c) - 2);
1848 if (bits > 0)
1849 for (; bits > 0; bits--)
1850 {
1851 new_const = c << bits;
1852 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1853 if (!temp)
1854 {
1855 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1856 temp = alpha_emit_set_const (subtarget, mode, new_const,
1857 i, no_output);
1858 }
1859 if (temp)
1860 {
1861 if (no_output)
1862 return temp;
1863 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1864 target, 0, OPTAB_WIDEN);
1865 }
1866 }
1867 }
1868
1869 #if HOST_BITS_PER_WIDE_INT == 64
1870 /* Finally, see if can load a value into the target that is the same as the
1871 constant except that all bytes that are 0 are changed to be 0xff. If we
1872 can, then we can do a ZAPNOT to obtain the desired constant. */
1873
1874 new_const = c;
1875 for (i = 0; i < 64; i += 8)
1876 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1877 new_const |= (HOST_WIDE_INT) 0xff << i;
1878
1879 /* We are only called for SImode and DImode. If this is SImode, ensure that
1880 we are sign extended to a full word. */
1881
1882 if (mode == SImode)
1883 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1884
1885 if (new_const != c)
1886 {
1887 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1888 if (temp)
1889 {
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1893 target, 0, OPTAB_WIDEN);
1894 }
1895 }
1896 #endif
1897
1898 return 0;
1899 }
1900
1901 /* Try to output insns to set TARGET equal to the constant C if it can be
1902 done in less than N insns. Do all computations in MODE. Returns the place
1903 where the output has been placed if it can be done and the insns have been
1904 emitted. If it would take more than N insns, zero is returned and no
1905 insns and emitted. */
1906
1907 static rtx
1908 alpha_emit_set_const (rtx target, enum machine_mode mode,
1909 HOST_WIDE_INT c, int n, bool no_output)
1910 {
1911 enum machine_mode orig_mode = mode;
1912 rtx orig_target = target;
1913 rtx result = 0;
1914 int i;
1915
1916 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1917 can't load this constant in one insn, do this in DImode. */
1918 if (!can_create_pseudo_p () && mode == SImode
1919 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1920 {
1921 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1922 if (result)
1923 return result;
1924
1925 target = no_output ? NULL : gen_lowpart (DImode, target);
1926 mode = DImode;
1927 }
1928 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1929 {
1930 target = no_output ? NULL : gen_lowpart (DImode, target);
1931 mode = DImode;
1932 }
1933
1934 /* Try 1 insn, then 2, then up to N. */
1935 for (i = 1; i <= n; i++)
1936 {
1937 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1938 if (result)
1939 {
1940 rtx insn, set;
1941
1942 if (no_output)
1943 return result;
1944
1945 insn = get_last_insn ();
1946 set = single_set (insn);
1947 if (! CONSTANT_P (SET_SRC (set)))
1948 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1949 break;
1950 }
1951 }
1952
1953 /* Allow for the case where we changed the mode of TARGET. */
1954 if (result)
1955 {
1956 if (result == target)
1957 result = orig_target;
1958 else if (mode != orig_mode)
1959 result = gen_lowpart (orig_mode, result);
1960 }
1961
1962 return result;
1963 }
1964
1965 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1966 fall back to a straight forward decomposition. We do this to avoid
1967 exponential run times encountered when looking for longer sequences
1968 with alpha_emit_set_const. */
1969
1970 static rtx
1971 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1972 {
1973 HOST_WIDE_INT d1, d2, d3, d4;
1974
1975 /* Decompose the entire word */
1976 #if HOST_BITS_PER_WIDE_INT >= 64
1977 gcc_assert (c2 == -(c1 < 0));
1978 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1979 c1 -= d1;
1980 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1981 c1 = (c1 - d2) >> 32;
1982 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1983 c1 -= d3;
1984 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1985 gcc_assert (c1 == d4);
1986 #else
1987 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1988 c1 -= d1;
1989 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1990 gcc_assert (c1 == d2);
1991 c2 += (d2 < 0);
1992 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1993 c2 -= d3;
1994 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1995 gcc_assert (c2 == d4);
1996 #endif
1997
1998 /* Construct the high word */
1999 if (d4)
2000 {
2001 emit_move_insn (target, GEN_INT (d4));
2002 if (d3)
2003 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2004 }
2005 else
2006 emit_move_insn (target, GEN_INT (d3));
2007
2008 /* Shift it into place */
2009 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2010
2011 /* Add in the low bits. */
2012 if (d2)
2013 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2014 if (d1)
2015 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2016
2017 return target;
2018 }
2019
2020 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2021 the low 64 bits. */
2022
2023 static void
2024 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2025 {
2026 HOST_WIDE_INT i0, i1;
2027
2028 if (GET_CODE (x) == CONST_VECTOR)
2029 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2030
2031
2032 if (CONST_INT_P (x))
2033 {
2034 i0 = INTVAL (x);
2035 i1 = -(i0 < 0);
2036 }
2037 else if (HOST_BITS_PER_WIDE_INT >= 64)
2038 {
2039 i0 = CONST_DOUBLE_LOW (x);
2040 i1 = -(i0 < 0);
2041 }
2042 else
2043 {
2044 i0 = CONST_DOUBLE_LOW (x);
2045 i1 = CONST_DOUBLE_HIGH (x);
2046 }
2047
2048 *p0 = i0;
2049 *p1 = i1;
2050 }
2051
2052 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2053 are willing to load the value into a register via a move pattern.
2054 Normally this is all symbolic constants, integral constants that
2055 take three or fewer instructions, and floating-point zero. */
2056
2057 bool
2058 alpha_legitimate_constant_p (rtx x)
2059 {
2060 enum machine_mode mode = GET_MODE (x);
2061 HOST_WIDE_INT i0, i1;
2062
2063 switch (GET_CODE (x))
2064 {
2065 case LABEL_REF:
2066 case HIGH:
2067 return true;
2068
2069 case CONST:
2070 if (GET_CODE (XEXP (x, 0)) == PLUS
2071 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2072 x = XEXP (XEXP (x, 0), 0);
2073 else
2074 return true;
2075
2076 if (GET_CODE (x) != SYMBOL_REF)
2077 return true;
2078
2079 /* FALLTHRU */
2080
2081 case SYMBOL_REF:
2082 /* TLS symbols are never valid. */
2083 return SYMBOL_REF_TLS_MODEL (x) == 0;
2084
2085 case CONST_DOUBLE:
2086 if (x == CONST0_RTX (mode))
2087 return true;
2088 if (FLOAT_MODE_P (mode))
2089 return false;
2090 goto do_integer;
2091
2092 case CONST_VECTOR:
2093 if (x == CONST0_RTX (mode))
2094 return true;
2095 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2096 return false;
2097 if (GET_MODE_SIZE (mode) != 8)
2098 return false;
2099 goto do_integer;
2100
2101 case CONST_INT:
2102 do_integer:
2103 if (TARGET_BUILD_CONSTANTS)
2104 return true;
2105 alpha_extract_integer (x, &i0, &i1);
2106 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2107 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2108 return false;
2109
2110 default:
2111 return false;
2112 }
2113 }
2114
2115 /* Operand 1 is known to be a constant, and should require more than one
2116 instruction to load. Emit that multi-part load. */
2117
2118 bool
2119 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2120 {
2121 HOST_WIDE_INT i0, i1;
2122 rtx temp = NULL_RTX;
2123
2124 alpha_extract_integer (operands[1], &i0, &i1);
2125
2126 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2127 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2128
2129 if (!temp && TARGET_BUILD_CONSTANTS)
2130 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2131
2132 if (temp)
2133 {
2134 if (!rtx_equal_p (operands[0], temp))
2135 emit_move_insn (operands[0], temp);
2136 return true;
2137 }
2138
2139 return false;
2140 }
2141
2142 /* Expand a move instruction; return true if all work is done.
2143 We don't handle non-bwx subword loads here. */
2144
2145 bool
2146 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2147 {
2148 rtx tmp;
2149
2150 /* If the output is not a register, the input must be. */
2151 if (MEM_P (operands[0])
2152 && ! reg_or_0_operand (operands[1], mode))
2153 operands[1] = force_reg (mode, operands[1]);
2154
2155 /* Allow legitimize_address to perform some simplifications. */
2156 if (mode == Pmode && symbolic_operand (operands[1], mode))
2157 {
2158 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2159 if (tmp)
2160 {
2161 if (tmp == operands[0])
2162 return true;
2163 operands[1] = tmp;
2164 return false;
2165 }
2166 }
2167
2168 /* Early out for non-constants and valid constants. */
2169 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2170 return false;
2171
2172 /* Split large integers. */
2173 if (CONST_INT_P (operands[1])
2174 || GET_CODE (operands[1]) == CONST_DOUBLE
2175 || GET_CODE (operands[1]) == CONST_VECTOR)
2176 {
2177 if (alpha_split_const_mov (mode, operands))
2178 return true;
2179 }
2180
2181 /* Otherwise we've nothing left but to drop the thing to memory. */
2182 tmp = force_const_mem (mode, operands[1]);
2183
2184 if (tmp == NULL_RTX)
2185 return false;
2186
2187 if (reload_in_progress)
2188 {
2189 emit_move_insn (operands[0], XEXP (tmp, 0));
2190 operands[1] = replace_equiv_address (tmp, operands[0]);
2191 }
2192 else
2193 operands[1] = validize_mem (tmp);
2194 return false;
2195 }
2196
2197 /* Expand a non-bwx QImode or HImode move instruction;
2198 return true if all work is done. */
2199
2200 bool
2201 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2202 {
2203 rtx seq;
2204
2205 /* If the output is not a register, the input must be. */
2206 if (MEM_P (operands[0]))
2207 operands[1] = force_reg (mode, operands[1]);
2208
2209 /* Handle four memory cases, unaligned and aligned for either the input
2210 or the output. The only case where we can be called during reload is
2211 for aligned loads; all other cases require temporaries. */
2212
2213 if (any_memory_operand (operands[1], mode))
2214 {
2215 if (aligned_memory_operand (operands[1], mode))
2216 {
2217 if (reload_in_progress)
2218 {
2219 if (mode == QImode)
2220 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2221 else
2222 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2223 emit_insn (seq);
2224 }
2225 else
2226 {
2227 rtx aligned_mem, bitnum;
2228 rtx scratch = gen_reg_rtx (SImode);
2229 rtx subtarget;
2230 bool copyout;
2231
2232 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2233
2234 subtarget = operands[0];
2235 if (REG_P (subtarget))
2236 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2237 else
2238 subtarget = gen_reg_rtx (DImode), copyout = true;
2239
2240 if (mode == QImode)
2241 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2242 bitnum, scratch);
2243 else
2244 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2245 bitnum, scratch);
2246 emit_insn (seq);
2247
2248 if (copyout)
2249 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2250 }
2251 }
2252 else
2253 {
2254 /* Don't pass these as parameters since that makes the generated
2255 code depend on parameter evaluation order which will cause
2256 bootstrap failures. */
2257
2258 rtx temp1, temp2, subtarget, ua;
2259 bool copyout;
2260
2261 temp1 = gen_reg_rtx (DImode);
2262 temp2 = gen_reg_rtx (DImode);
2263
2264 subtarget = operands[0];
2265 if (REG_P (subtarget))
2266 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2267 else
2268 subtarget = gen_reg_rtx (DImode), copyout = true;
2269
2270 ua = get_unaligned_address (operands[1]);
2271 if (mode == QImode)
2272 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2273 else
2274 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2275
2276 alpha_set_memflags (seq, operands[1]);
2277 emit_insn (seq);
2278
2279 if (copyout)
2280 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2281 }
2282 return true;
2283 }
2284
2285 if (any_memory_operand (operands[0], mode))
2286 {
2287 if (aligned_memory_operand (operands[0], mode))
2288 {
2289 rtx aligned_mem, bitnum;
2290 rtx temp1 = gen_reg_rtx (SImode);
2291 rtx temp2 = gen_reg_rtx (SImode);
2292
2293 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2294
2295 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2296 temp1, temp2));
2297 }
2298 else
2299 {
2300 rtx temp1 = gen_reg_rtx (DImode);
2301 rtx temp2 = gen_reg_rtx (DImode);
2302 rtx temp3 = gen_reg_rtx (DImode);
2303 rtx ua = get_unaligned_address (operands[0]);
2304
2305 if (mode == QImode)
2306 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2307 else
2308 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2309
2310 alpha_set_memflags (seq, operands[0]);
2311 emit_insn (seq);
2312 }
2313 return true;
2314 }
2315
2316 return false;
2317 }
2318
2319 /* Implement the movmisalign patterns. One of the operands is a memory
2320 that is not naturally aligned. Emit instructions to load it. */
2321
2322 void
2323 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2324 {
2325 /* Honor misaligned loads, for those we promised to do so. */
2326 if (MEM_P (operands[1]))
2327 {
2328 rtx tmp;
2329
2330 if (register_operand (operands[0], mode))
2331 tmp = operands[0];
2332 else
2333 tmp = gen_reg_rtx (mode);
2334
2335 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2336 if (tmp != operands[0])
2337 emit_move_insn (operands[0], tmp);
2338 }
2339 else if (MEM_P (operands[0]))
2340 {
2341 if (!reg_or_0_operand (operands[1], mode))
2342 operands[1] = force_reg (mode, operands[1]);
2343 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2344 }
2345 else
2346 gcc_unreachable ();
2347 }
2348
2349 /* Generate an unsigned DImode to FP conversion. This is the same code
2350 optabs would emit if we didn't have TFmode patterns.
2351
2352 For SFmode, this is the only construction I've found that can pass
2353 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2354 intermediates will work, because you'll get intermediate rounding
2355 that ruins the end result. Some of this could be fixed by turning
2356 on round-to-positive-infinity, but that requires diddling the fpsr,
2357 which kills performance. I tried turning this around and converting
2358 to a negative number, so that I could turn on /m, but either I did
2359 it wrong or there's something else cause I wound up with the exact
2360 same single-bit error. There is a branch-less form of this same code:
2361
2362 srl $16,1,$1
2363 and $16,1,$2
2364 cmplt $16,0,$3
2365 or $1,$2,$2
2366 cmovge $16,$16,$2
2367 itoft $3,$f10
2368 itoft $2,$f11
2369 cvtqs $f11,$f11
2370 adds $f11,$f11,$f0
2371 fcmoveq $f10,$f11,$f0
2372
2373 I'm not using it because it's the same number of instructions as
2374 this branch-full form, and it has more serialized long latency
2375 instructions on the critical path.
2376
2377 For DFmode, we can avoid rounding errors by breaking up the word
2378 into two pieces, converting them separately, and adding them back:
2379
2380 LC0: .long 0,0x5f800000
2381
2382 itoft $16,$f11
2383 lda $2,LC0
2384 cmplt $16,0,$1
2385 cpyse $f11,$f31,$f10
2386 cpyse $f31,$f11,$f11
2387 s4addq $1,$2,$1
2388 lds $f12,0($1)
2389 cvtqt $f10,$f10
2390 cvtqt $f11,$f11
2391 addt $f12,$f10,$f0
2392 addt $f0,$f11,$f0
2393
2394 This doesn't seem to be a clear-cut win over the optabs form.
2395 It probably all depends on the distribution of numbers being
2396 converted -- in the optabs form, all but high-bit-set has a
2397 much lower minimum execution time. */
2398
2399 void
2400 alpha_emit_floatuns (rtx operands[2])
2401 {
2402 rtx neglab, donelab, i0, i1, f0, in, out;
2403 enum machine_mode mode;
2404
2405 out = operands[0];
2406 in = force_reg (DImode, operands[1]);
2407 mode = GET_MODE (out);
2408 neglab = gen_label_rtx ();
2409 donelab = gen_label_rtx ();
2410 i0 = gen_reg_rtx (DImode);
2411 i1 = gen_reg_rtx (DImode);
2412 f0 = gen_reg_rtx (mode);
2413
2414 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2415
2416 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2417 emit_jump_insn (gen_jump (donelab));
2418 emit_barrier ();
2419
2420 emit_label (neglab);
2421
2422 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2423 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2424 emit_insn (gen_iordi3 (i0, i0, i1));
2425 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2426 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2427
2428 emit_label (donelab);
2429 }
2430
2431 /* Generate the comparison for a conditional branch. */
2432
2433 void
2434 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2435 {
2436 enum rtx_code cmp_code, branch_code;
2437 enum machine_mode branch_mode = VOIDmode;
2438 enum rtx_code code = GET_CODE (operands[0]);
2439 rtx op0 = operands[1], op1 = operands[2];
2440 rtx tem;
2441
2442 if (cmp_mode == TFmode)
2443 {
2444 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2445 op1 = const0_rtx;
2446 cmp_mode = DImode;
2447 }
2448
2449 /* The general case: fold the comparison code to the types of compares
2450 that we have, choosing the branch as necessary. */
2451 switch (code)
2452 {
2453 case EQ: case LE: case LT: case LEU: case LTU:
2454 case UNORDERED:
2455 /* We have these compares: */
2456 cmp_code = code, branch_code = NE;
2457 break;
2458
2459 case NE:
2460 case ORDERED:
2461 /* These must be reversed. */
2462 cmp_code = reverse_condition (code), branch_code = EQ;
2463 break;
2464
2465 case GE: case GT: case GEU: case GTU:
2466 /* For FP, we swap them, for INT, we reverse them. */
2467 if (cmp_mode == DFmode)
2468 {
2469 cmp_code = swap_condition (code);
2470 branch_code = NE;
2471 tem = op0, op0 = op1, op1 = tem;
2472 }
2473 else
2474 {
2475 cmp_code = reverse_condition (code);
2476 branch_code = EQ;
2477 }
2478 break;
2479
2480 default:
2481 gcc_unreachable ();
2482 }
2483
2484 if (cmp_mode == DFmode)
2485 {
2486 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2487 {
2488 /* When we are not as concerned about non-finite values, and we
2489 are comparing against zero, we can branch directly. */
2490 if (op1 == CONST0_RTX (DFmode))
2491 cmp_code = UNKNOWN, branch_code = code;
2492 else if (op0 == CONST0_RTX (DFmode))
2493 {
2494 /* Undo the swap we probably did just above. */
2495 tem = op0, op0 = op1, op1 = tem;
2496 branch_code = swap_condition (cmp_code);
2497 cmp_code = UNKNOWN;
2498 }
2499 }
2500 else
2501 {
2502 /* ??? We mark the branch mode to be CCmode to prevent the
2503 compare and branch from being combined, since the compare
2504 insn follows IEEE rules that the branch does not. */
2505 branch_mode = CCmode;
2506 }
2507 }
2508 else
2509 {
2510 /* The following optimizations are only for signed compares. */
2511 if (code != LEU && code != LTU && code != GEU && code != GTU)
2512 {
2513 /* Whee. Compare and branch against 0 directly. */
2514 if (op1 == const0_rtx)
2515 cmp_code = UNKNOWN, branch_code = code;
2516
2517 /* If the constants doesn't fit into an immediate, but can
2518 be generated by lda/ldah, we adjust the argument and
2519 compare against zero, so we can use beq/bne directly. */
2520 /* ??? Don't do this when comparing against symbols, otherwise
2521 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2522 be declared false out of hand (at least for non-weak). */
2523 else if (CONST_INT_P (op1)
2524 && (code == EQ || code == NE)
2525 && !(symbolic_operand (op0, VOIDmode)
2526 || (REG_P (op0) && REG_POINTER (op0))))
2527 {
2528 rtx n_op1 = GEN_INT (-INTVAL (op1));
2529
2530 if (! satisfies_constraint_I (op1)
2531 && (satisfies_constraint_K (n_op1)
2532 || satisfies_constraint_L (n_op1)))
2533 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2534 }
2535 }
2536
2537 if (!reg_or_0_operand (op0, DImode))
2538 op0 = force_reg (DImode, op0);
2539 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2540 op1 = force_reg (DImode, op1);
2541 }
2542
2543 /* Emit an initial compare instruction, if necessary. */
2544 tem = op0;
2545 if (cmp_code != UNKNOWN)
2546 {
2547 tem = gen_reg_rtx (cmp_mode);
2548 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2549 }
2550
2551 /* Emit the branch instruction. */
2552 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2553 gen_rtx_IF_THEN_ELSE (VOIDmode,
2554 gen_rtx_fmt_ee (branch_code,
2555 branch_mode, tem,
2556 CONST0_RTX (cmp_mode)),
2557 gen_rtx_LABEL_REF (VOIDmode,
2558 operands[3]),
2559 pc_rtx));
2560 emit_jump_insn (tem);
2561 }
2562
2563 /* Certain simplifications can be done to make invalid setcc operations
2564 valid. Return the final comparison, or NULL if we can't work. */
2565
2566 bool
2567 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2568 {
2569 enum rtx_code cmp_code;
2570 enum rtx_code code = GET_CODE (operands[1]);
2571 rtx op0 = operands[2], op1 = operands[3];
2572 rtx tmp;
2573
2574 if (cmp_mode == TFmode)
2575 {
2576 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2577 op1 = const0_rtx;
2578 cmp_mode = DImode;
2579 }
2580
2581 if (cmp_mode == DFmode && !TARGET_FIX)
2582 return 0;
2583
2584 /* The general case: fold the comparison code to the types of compares
2585 that we have, choosing the branch as necessary. */
2586
2587 cmp_code = UNKNOWN;
2588 switch (code)
2589 {
2590 case EQ: case LE: case LT: case LEU: case LTU:
2591 case UNORDERED:
2592 /* We have these compares. */
2593 if (cmp_mode == DFmode)
2594 cmp_code = code, code = NE;
2595 break;
2596
2597 case NE:
2598 if (cmp_mode == DImode && op1 == const0_rtx)
2599 break;
2600 /* FALLTHRU */
2601
2602 case ORDERED:
2603 cmp_code = reverse_condition (code);
2604 code = EQ;
2605 break;
2606
2607 case GE: case GT: case GEU: case GTU:
2608 /* These normally need swapping, but for integer zero we have
2609 special patterns that recognize swapped operands. */
2610 if (cmp_mode == DImode && op1 == const0_rtx)
2611 break;
2612 code = swap_condition (code);
2613 if (cmp_mode == DFmode)
2614 cmp_code = code, code = NE;
2615 tmp = op0, op0 = op1, op1 = tmp;
2616 break;
2617
2618 default:
2619 gcc_unreachable ();
2620 }
2621
2622 if (cmp_mode == DImode)
2623 {
2624 if (!register_operand (op0, DImode))
2625 op0 = force_reg (DImode, op0);
2626 if (!reg_or_8bit_operand (op1, DImode))
2627 op1 = force_reg (DImode, op1);
2628 }
2629
2630 /* Emit an initial compare instruction, if necessary. */
2631 if (cmp_code != UNKNOWN)
2632 {
2633 tmp = gen_reg_rtx (cmp_mode);
2634 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2635 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2636
2637 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2638 op1 = const0_rtx;
2639 }
2640
2641 /* Emit the setcc instruction. */
2642 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2643 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2644 return true;
2645 }
2646
2647
2648 /* Rewrite a comparison against zero CMP of the form
2649 (CODE (cc0) (const_int 0)) so it can be written validly in
2650 a conditional move (if_then_else CMP ...).
2651 If both of the operands that set cc0 are nonzero we must emit
2652 an insn to perform the compare (it can't be done within
2653 the conditional move). */
2654
2655 rtx
2656 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2657 {
2658 enum rtx_code code = GET_CODE (cmp);
2659 enum rtx_code cmov_code = NE;
2660 rtx op0 = XEXP (cmp, 0);
2661 rtx op1 = XEXP (cmp, 1);
2662 enum machine_mode cmp_mode
2663 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2664 enum machine_mode cmov_mode = VOIDmode;
2665 int local_fast_math = flag_unsafe_math_optimizations;
2666 rtx tem;
2667
2668 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2669
2670 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2671 {
2672 enum rtx_code cmp_code;
2673
2674 if (! TARGET_FIX)
2675 return 0;
2676
2677 /* If we have fp<->int register move instructions, do a cmov by
2678 performing the comparison in fp registers, and move the
2679 zero/nonzero value to integer registers, where we can then
2680 use a normal cmov, or vice-versa. */
2681
2682 switch (code)
2683 {
2684 case EQ: case LE: case LT: case LEU: case LTU:
2685 /* We have these compares. */
2686 cmp_code = code, code = NE;
2687 break;
2688
2689 case NE:
2690 /* This must be reversed. */
2691 cmp_code = EQ, code = EQ;
2692 break;
2693
2694 case GE: case GT: case GEU: case GTU:
2695 /* These normally need swapping, but for integer zero we have
2696 special patterns that recognize swapped operands. */
2697 if (cmp_mode == DImode && op1 == const0_rtx)
2698 cmp_code = code, code = NE;
2699 else
2700 {
2701 cmp_code = swap_condition (code);
2702 code = NE;
2703 tem = op0, op0 = op1, op1 = tem;
2704 }
2705 break;
2706
2707 default:
2708 gcc_unreachable ();
2709 }
2710
2711 tem = gen_reg_rtx (cmp_mode);
2712 emit_insn (gen_rtx_SET (VOIDmode, tem,
2713 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2714 op0, op1)));
2715
2716 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2717 op0 = gen_lowpart (cmp_mode, tem);
2718 op1 = CONST0_RTX (cmp_mode);
2719 local_fast_math = 1;
2720 }
2721
2722 /* We may be able to use a conditional move directly.
2723 This avoids emitting spurious compares. */
2724 if (signed_comparison_operator (cmp, VOIDmode)
2725 && (cmp_mode == DImode || local_fast_math)
2726 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2727 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2728
2729 /* We can't put the comparison inside the conditional move;
2730 emit a compare instruction and put that inside the
2731 conditional move. Make sure we emit only comparisons we have;
2732 swap or reverse as necessary. */
2733
2734 if (!can_create_pseudo_p ())
2735 return NULL_RTX;
2736
2737 switch (code)
2738 {
2739 case EQ: case LE: case LT: case LEU: case LTU:
2740 /* We have these compares: */
2741 break;
2742
2743 case NE:
2744 /* This must be reversed. */
2745 code = reverse_condition (code);
2746 cmov_code = EQ;
2747 break;
2748
2749 case GE: case GT: case GEU: case GTU:
2750 /* These must be swapped. */
2751 if (op1 != CONST0_RTX (cmp_mode))
2752 {
2753 code = swap_condition (code);
2754 tem = op0, op0 = op1, op1 = tem;
2755 }
2756 break;
2757
2758 default:
2759 gcc_unreachable ();
2760 }
2761
2762 if (cmp_mode == DImode)
2763 {
2764 if (!reg_or_0_operand (op0, DImode))
2765 op0 = force_reg (DImode, op0);
2766 if (!reg_or_8bit_operand (op1, DImode))
2767 op1 = force_reg (DImode, op1);
2768 }
2769
2770 /* ??? We mark the branch mode to be CCmode to prevent the compare
2771 and cmov from being combined, since the compare insn follows IEEE
2772 rules that the cmov does not. */
2773 if (cmp_mode == DFmode && !local_fast_math)
2774 cmov_mode = CCmode;
2775
2776 tem = gen_reg_rtx (cmp_mode);
2777 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2778 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2779 }
2780
2781 /* Simplify a conditional move of two constants into a setcc with
2782 arithmetic. This is done with a splitter since combine would
2783 just undo the work if done during code generation. It also catches
2784 cases we wouldn't have before cse. */
2785
2786 int
2787 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2788 rtx t_rtx, rtx f_rtx)
2789 {
2790 HOST_WIDE_INT t, f, diff;
2791 enum machine_mode mode;
2792 rtx target, subtarget, tmp;
2793
2794 mode = GET_MODE (dest);
2795 t = INTVAL (t_rtx);
2796 f = INTVAL (f_rtx);
2797 diff = t - f;
2798
2799 if (((code == NE || code == EQ) && diff < 0)
2800 || (code == GE || code == GT))
2801 {
2802 code = reverse_condition (code);
2803 diff = t, t = f, f = diff;
2804 diff = t - f;
2805 }
2806
2807 subtarget = target = dest;
2808 if (mode != DImode)
2809 {
2810 target = gen_lowpart (DImode, dest);
2811 if (can_create_pseudo_p ())
2812 subtarget = gen_reg_rtx (DImode);
2813 else
2814 subtarget = target;
2815 }
2816 /* Below, we must be careful to use copy_rtx on target and subtarget
2817 in intermediate insns, as they may be a subreg rtx, which may not
2818 be shared. */
2819
2820 if (f == 0 && exact_log2 (diff) > 0
2821 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2822 viable over a longer latency cmove. On EV5, the E0 slot is a
2823 scarce resource, and on EV4 shift has the same latency as a cmove. */
2824 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2825 {
2826 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2827 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2828
2829 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2830 GEN_INT (exact_log2 (t)));
2831 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2832 }
2833 else if (f == 0 && t == -1)
2834 {
2835 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2836 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2837
2838 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2839 }
2840 else if (diff == 1 || diff == 4 || diff == 8)
2841 {
2842 rtx add_op;
2843
2844 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2845 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2846
2847 if (diff == 1)
2848 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2849 else
2850 {
2851 add_op = GEN_INT (f);
2852 if (sext_add_operand (add_op, mode))
2853 {
2854 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2855 GEN_INT (diff));
2856 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2857 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2858 }
2859 else
2860 return 0;
2861 }
2862 }
2863 else
2864 return 0;
2865
2866 return 1;
2867 }
2868 \f
2869 /* Look up the function X_floating library function name for the
2870 given operation. */
2871
2872 struct GTY(()) xfloating_op
2873 {
2874 const enum rtx_code code;
2875 const char *const GTY((skip)) osf_func;
2876 const char *const GTY((skip)) vms_func;
2877 rtx libcall;
2878 };
2879
2880 static GTY(()) struct xfloating_op xfloating_ops[] =
2881 {
2882 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2883 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2884 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2885 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2886 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2887 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2888 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2889 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2890 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2891 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2892 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2893 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2894 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2895 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2896 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2897 };
2898
2899 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2900 {
2901 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2902 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2903 };
2904
2905 static rtx
2906 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2907 {
2908 struct xfloating_op *ops = xfloating_ops;
2909 long n = ARRAY_SIZE (xfloating_ops);
2910 long i;
2911
2912 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2913
2914 /* How irritating. Nothing to key off for the main table. */
2915 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2916 {
2917 ops = vax_cvt_ops;
2918 n = ARRAY_SIZE (vax_cvt_ops);
2919 }
2920
2921 for (i = 0; i < n; ++i, ++ops)
2922 if (ops->code == code)
2923 {
2924 rtx func = ops->libcall;
2925 if (!func)
2926 {
2927 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2928 ? ops->vms_func : ops->osf_func);
2929 ops->libcall = func;
2930 }
2931 return func;
2932 }
2933
2934 gcc_unreachable ();
2935 }
2936
2937 /* Most X_floating operations take the rounding mode as an argument.
2938 Compute that here. */
2939
2940 static int
2941 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2942 enum alpha_fp_rounding_mode round)
2943 {
2944 int mode;
2945
2946 switch (round)
2947 {
2948 case ALPHA_FPRM_NORM:
2949 mode = 2;
2950 break;
2951 case ALPHA_FPRM_MINF:
2952 mode = 1;
2953 break;
2954 case ALPHA_FPRM_CHOP:
2955 mode = 0;
2956 break;
2957 case ALPHA_FPRM_DYN:
2958 mode = 4;
2959 break;
2960 default:
2961 gcc_unreachable ();
2962
2963 /* XXX For reference, round to +inf is mode = 3. */
2964 }
2965
2966 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2967 mode |= 0x10000;
2968
2969 return mode;
2970 }
2971
2972 /* Emit an X_floating library function call.
2973
2974 Note that these functions do not follow normal calling conventions:
2975 TFmode arguments are passed in two integer registers (as opposed to
2976 indirect); TFmode return values appear in R16+R17.
2977
2978 FUNC is the function to call.
2979 TARGET is where the output belongs.
2980 OPERANDS are the inputs.
2981 NOPERANDS is the count of inputs.
2982 EQUIV is the expression equivalent for the function.
2983 */
2984
2985 static void
2986 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2987 int noperands, rtx equiv)
2988 {
2989 rtx usage = NULL_RTX, tmp, reg;
2990 int regno = 16, i;
2991
2992 start_sequence ();
2993
2994 for (i = 0; i < noperands; ++i)
2995 {
2996 switch (GET_MODE (operands[i]))
2997 {
2998 case TFmode:
2999 reg = gen_rtx_REG (TFmode, regno);
3000 regno += 2;
3001 break;
3002
3003 case DFmode:
3004 reg = gen_rtx_REG (DFmode, regno + 32);
3005 regno += 1;
3006 break;
3007
3008 case VOIDmode:
3009 gcc_assert (CONST_INT_P (operands[i]));
3010 /* FALLTHRU */
3011 case DImode:
3012 reg = gen_rtx_REG (DImode, regno);
3013 regno += 1;
3014 break;
3015
3016 default:
3017 gcc_unreachable ();
3018 }
3019
3020 emit_move_insn (reg, operands[i]);
3021 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3022 }
3023
3024 switch (GET_MODE (target))
3025 {
3026 case TFmode:
3027 reg = gen_rtx_REG (TFmode, 16);
3028 break;
3029 case DFmode:
3030 reg = gen_rtx_REG (DFmode, 32);
3031 break;
3032 case DImode:
3033 reg = gen_rtx_REG (DImode, 0);
3034 break;
3035 default:
3036 gcc_unreachable ();
3037 }
3038
3039 tmp = gen_rtx_MEM (QImode, func);
3040 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3041 const0_rtx, const0_rtx));
3042 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3043 RTL_CONST_CALL_P (tmp) = 1;
3044
3045 tmp = get_insns ();
3046 end_sequence ();
3047
3048 emit_libcall_block (tmp, target, reg, equiv);
3049 }
3050
3051 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3052
3053 void
3054 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3055 {
3056 rtx func;
3057 int mode;
3058 rtx out_operands[3];
3059
3060 func = alpha_lookup_xfloating_lib_func (code);
3061 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3062
3063 out_operands[0] = operands[1];
3064 out_operands[1] = operands[2];
3065 out_operands[2] = GEN_INT (mode);
3066 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3067 gen_rtx_fmt_ee (code, TFmode, operands[1],
3068 operands[2]));
3069 }
3070
3071 /* Emit an X_floating library function call for a comparison. */
3072
3073 static rtx
3074 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3075 {
3076 enum rtx_code cmp_code, res_code;
3077 rtx func, out, operands[2], note;
3078
3079 /* X_floating library comparison functions return
3080 -1 unordered
3081 0 false
3082 1 true
3083 Convert the compare against the raw return value. */
3084
3085 cmp_code = *pcode;
3086 switch (cmp_code)
3087 {
3088 case UNORDERED:
3089 cmp_code = EQ;
3090 res_code = LT;
3091 break;
3092 case ORDERED:
3093 cmp_code = EQ;
3094 res_code = GE;
3095 break;
3096 case NE:
3097 res_code = NE;
3098 break;
3099 case EQ:
3100 case LT:
3101 case GT:
3102 case LE:
3103 case GE:
3104 res_code = GT;
3105 break;
3106 default:
3107 gcc_unreachable ();
3108 }
3109 *pcode = res_code;
3110
3111 func = alpha_lookup_xfloating_lib_func (cmp_code);
3112
3113 operands[0] = op0;
3114 operands[1] = op1;
3115 out = gen_reg_rtx (DImode);
3116
3117 /* What's actually returned is -1,0,1, not a proper boolean value,
3118 so use an EXPR_LIST as with a generic libcall instead of a
3119 comparison type expression. */
3120 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3121 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3122 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3123 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3124
3125 return out;
3126 }
3127
3128 /* Emit an X_floating library function call for a conversion. */
3129
3130 void
3131 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3132 {
3133 int noperands = 1, mode;
3134 rtx out_operands[2];
3135 rtx func;
3136 enum rtx_code code = orig_code;
3137
3138 if (code == UNSIGNED_FIX)
3139 code = FIX;
3140
3141 func = alpha_lookup_xfloating_lib_func (code);
3142
3143 out_operands[0] = operands[1];
3144
3145 switch (code)
3146 {
3147 case FIX:
3148 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3149 out_operands[1] = GEN_INT (mode);
3150 noperands = 2;
3151 break;
3152 case FLOAT_TRUNCATE:
3153 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3154 out_operands[1] = GEN_INT (mode);
3155 noperands = 2;
3156 break;
3157 default:
3158 break;
3159 }
3160
3161 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3162 gen_rtx_fmt_e (orig_code,
3163 GET_MODE (operands[0]),
3164 operands[1]));
3165 }
3166
3167 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3168 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3169 guarantee that the sequence
3170 set (OP[0] OP[2])
3171 set (OP[1] OP[3])
3172 is valid. Naturally, output operand ordering is little-endian.
3173 This is used by *movtf_internal and *movti_internal. */
3174
3175 void
3176 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3177 bool fixup_overlap)
3178 {
3179 switch (GET_CODE (operands[1]))
3180 {
3181 case REG:
3182 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3183 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3184 break;
3185
3186 case MEM:
3187 operands[3] = adjust_address (operands[1], DImode, 8);
3188 operands[2] = adjust_address (operands[1], DImode, 0);
3189 break;
3190
3191 case CONST_INT:
3192 case CONST_DOUBLE:
3193 gcc_assert (operands[1] == CONST0_RTX (mode));
3194 operands[2] = operands[3] = const0_rtx;
3195 break;
3196
3197 default:
3198 gcc_unreachable ();
3199 }
3200
3201 switch (GET_CODE (operands[0]))
3202 {
3203 case REG:
3204 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3205 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3206 break;
3207
3208 case MEM:
3209 operands[1] = adjust_address (operands[0], DImode, 8);
3210 operands[0] = adjust_address (operands[0], DImode, 0);
3211 break;
3212
3213 default:
3214 gcc_unreachable ();
3215 }
3216
3217 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3218 {
3219 rtx tmp;
3220 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3221 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3222 }
3223 }
3224
3225 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3226 op2 is a register containing the sign bit, operation is the
3227 logical operation to be performed. */
3228
3229 void
3230 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3231 {
3232 rtx high_bit = operands[2];
3233 rtx scratch;
3234 int move;
3235
3236 alpha_split_tmode_pair (operands, TFmode, false);
3237
3238 /* Detect three flavors of operand overlap. */
3239 move = 1;
3240 if (rtx_equal_p (operands[0], operands[2]))
3241 move = 0;
3242 else if (rtx_equal_p (operands[1], operands[2]))
3243 {
3244 if (rtx_equal_p (operands[0], high_bit))
3245 move = 2;
3246 else
3247 move = -1;
3248 }
3249
3250 if (move < 0)
3251 emit_move_insn (operands[0], operands[2]);
3252
3253 /* ??? If the destination overlaps both source tf and high_bit, then
3254 assume source tf is dead in its entirety and use the other half
3255 for a scratch register. Otherwise "scratch" is just the proper
3256 destination register. */
3257 scratch = operands[move < 2 ? 1 : 3];
3258
3259 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3260
3261 if (move > 0)
3262 {
3263 emit_move_insn (operands[0], operands[2]);
3264 if (move > 1)
3265 emit_move_insn (operands[1], scratch);
3266 }
3267 }
3268 \f
3269 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3270 unaligned data:
3271
3272 unsigned: signed:
3273 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3274 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3275 lda r3,X(r11) lda r3,X+2(r11)
3276 extwl r1,r3,r1 extql r1,r3,r1
3277 extwh r2,r3,r2 extqh r2,r3,r2
3278 or r1.r2.r1 or r1,r2,r1
3279 sra r1,48,r1
3280
3281 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3282 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3283 lda r3,X(r11) lda r3,X(r11)
3284 extll r1,r3,r1 extll r1,r3,r1
3285 extlh r2,r3,r2 extlh r2,r3,r2
3286 or r1.r2.r1 addl r1,r2,r1
3287
3288 quad: ldq_u r1,X(r11)
3289 ldq_u r2,X+7(r11)
3290 lda r3,X(r11)
3291 extql r1,r3,r1
3292 extqh r2,r3,r2
3293 or r1.r2.r1
3294 */
3295
3296 void
3297 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3298 HOST_WIDE_INT ofs, int sign)
3299 {
3300 rtx meml, memh, addr, extl, exth, tmp, mema;
3301 enum machine_mode mode;
3302
3303 if (TARGET_BWX && size == 2)
3304 {
3305 meml = adjust_address (mem, QImode, ofs);
3306 memh = adjust_address (mem, QImode, ofs+1);
3307 if (BYTES_BIG_ENDIAN)
3308 tmp = meml, meml = memh, memh = tmp;
3309 extl = gen_reg_rtx (DImode);
3310 exth = gen_reg_rtx (DImode);
3311 emit_insn (gen_zero_extendqidi2 (extl, meml));
3312 emit_insn (gen_zero_extendqidi2 (exth, memh));
3313 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3314 NULL, 1, OPTAB_LIB_WIDEN);
3315 addr = expand_simple_binop (DImode, IOR, extl, exth,
3316 NULL, 1, OPTAB_LIB_WIDEN);
3317
3318 if (sign && GET_MODE (tgt) != HImode)
3319 {
3320 addr = gen_lowpart (HImode, addr);
3321 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3322 }
3323 else
3324 {
3325 if (GET_MODE (tgt) != DImode)
3326 addr = gen_lowpart (GET_MODE (tgt), addr);
3327 emit_move_insn (tgt, addr);
3328 }
3329 return;
3330 }
3331
3332 meml = gen_reg_rtx (DImode);
3333 memh = gen_reg_rtx (DImode);
3334 addr = gen_reg_rtx (DImode);
3335 extl = gen_reg_rtx (DImode);
3336 exth = gen_reg_rtx (DImode);
3337
3338 mema = XEXP (mem, 0);
3339 if (GET_CODE (mema) == LO_SUM)
3340 mema = force_reg (Pmode, mema);
3341
3342 /* AND addresses cannot be in any alias set, since they may implicitly
3343 alias surrounding code. Ideally we'd have some alias set that
3344 covered all types except those with alignment 8 or higher. */
3345
3346 tmp = change_address (mem, DImode,
3347 gen_rtx_AND (DImode,
3348 plus_constant (mema, ofs),
3349 GEN_INT (-8)));
3350 set_mem_alias_set (tmp, 0);
3351 emit_move_insn (meml, tmp);
3352
3353 tmp = change_address (mem, DImode,
3354 gen_rtx_AND (DImode,
3355 plus_constant (mema, ofs + size - 1),
3356 GEN_INT (-8)));
3357 set_mem_alias_set (tmp, 0);
3358 emit_move_insn (memh, tmp);
3359
3360 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3361 {
3362 emit_move_insn (addr, plus_constant (mema, -1));
3363
3364 emit_insn (gen_extqh_be (extl, meml, addr));
3365 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3366
3367 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3368 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3369 addr, 1, OPTAB_WIDEN);
3370 }
3371 else if (sign && size == 2)
3372 {
3373 emit_move_insn (addr, plus_constant (mema, ofs+2));
3374
3375 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3376 emit_insn (gen_extqh_le (exth, memh, addr));
3377
3378 /* We must use tgt here for the target. Alpha-vms port fails if we use
3379 addr for the target, because addr is marked as a pointer and combine
3380 knows that pointers are always sign-extended 32-bit values. */
3381 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3382 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3383 addr, 1, OPTAB_WIDEN);
3384 }
3385 else
3386 {
3387 if (WORDS_BIG_ENDIAN)
3388 {
3389 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3390 switch ((int) size)
3391 {
3392 case 2:
3393 emit_insn (gen_extwh_be (extl, meml, addr));
3394 mode = HImode;
3395 break;
3396
3397 case 4:
3398 emit_insn (gen_extlh_be (extl, meml, addr));
3399 mode = SImode;
3400 break;
3401
3402 case 8:
3403 emit_insn (gen_extqh_be (extl, meml, addr));
3404 mode = DImode;
3405 break;
3406
3407 default:
3408 gcc_unreachable ();
3409 }
3410 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3411 }
3412 else
3413 {
3414 emit_move_insn (addr, plus_constant (mema, ofs));
3415 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3416 switch ((int) size)
3417 {
3418 case 2:
3419 emit_insn (gen_extwh_le (exth, memh, addr));
3420 mode = HImode;
3421 break;
3422
3423 case 4:
3424 emit_insn (gen_extlh_le (exth, memh, addr));
3425 mode = SImode;
3426 break;
3427
3428 case 8:
3429 emit_insn (gen_extqh_le (exth, memh, addr));
3430 mode = DImode;
3431 break;
3432
3433 default:
3434 gcc_unreachable ();
3435 }
3436 }
3437
3438 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3439 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3440 sign, OPTAB_WIDEN);
3441 }
3442
3443 if (addr != tgt)
3444 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3445 }
3446
3447 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3448
3449 void
3450 alpha_expand_unaligned_store (rtx dst, rtx src,
3451 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3452 {
3453 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3454
3455 if (TARGET_BWX && size == 2)
3456 {
3457 if (src != const0_rtx)
3458 {
3459 dstl = gen_lowpart (QImode, src);
3460 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3461 NULL, 1, OPTAB_LIB_WIDEN);
3462 dsth = gen_lowpart (QImode, dsth);
3463 }
3464 else
3465 dstl = dsth = const0_rtx;
3466
3467 meml = adjust_address (dst, QImode, ofs);
3468 memh = adjust_address (dst, QImode, ofs+1);
3469 if (BYTES_BIG_ENDIAN)
3470 addr = meml, meml = memh, memh = addr;
3471
3472 emit_move_insn (meml, dstl);
3473 emit_move_insn (memh, dsth);
3474 return;
3475 }
3476
3477 dstl = gen_reg_rtx (DImode);
3478 dsth = gen_reg_rtx (DImode);
3479 insl = gen_reg_rtx (DImode);
3480 insh = gen_reg_rtx (DImode);
3481
3482 dsta = XEXP (dst, 0);
3483 if (GET_CODE (dsta) == LO_SUM)
3484 dsta = force_reg (Pmode, dsta);
3485
3486 /* AND addresses cannot be in any alias set, since they may implicitly
3487 alias surrounding code. Ideally we'd have some alias set that
3488 covered all types except those with alignment 8 or higher. */
3489
3490 meml = change_address (dst, DImode,
3491 gen_rtx_AND (DImode,
3492 plus_constant (dsta, ofs),
3493 GEN_INT (-8)));
3494 set_mem_alias_set (meml, 0);
3495
3496 memh = change_address (dst, DImode,
3497 gen_rtx_AND (DImode,
3498 plus_constant (dsta, ofs + size - 1),
3499 GEN_INT (-8)));
3500 set_mem_alias_set (memh, 0);
3501
3502 emit_move_insn (dsth, memh);
3503 emit_move_insn (dstl, meml);
3504 if (WORDS_BIG_ENDIAN)
3505 {
3506 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3507
3508 if (src != const0_rtx)
3509 {
3510 switch ((int) size)
3511 {
3512 case 2:
3513 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3514 break;
3515 case 4:
3516 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3517 break;
3518 case 8:
3519 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3520 break;
3521 }
3522 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3523 GEN_INT (size*8), addr));
3524 }
3525
3526 switch ((int) size)
3527 {
3528 case 2:
3529 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3530 break;
3531 case 4:
3532 {
3533 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3534 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3535 break;
3536 }
3537 case 8:
3538 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3539 break;
3540 }
3541
3542 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3543 }
3544 else
3545 {
3546 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3547
3548 if (src != CONST0_RTX (GET_MODE (src)))
3549 {
3550 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3551 GEN_INT (size*8), addr));
3552
3553 switch ((int) size)
3554 {
3555 case 2:
3556 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3557 break;
3558 case 4:
3559 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3560 break;
3561 case 8:
3562 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3563 break;
3564 }
3565 }
3566
3567 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3568
3569 switch ((int) size)
3570 {
3571 case 2:
3572 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3573 break;
3574 case 4:
3575 {
3576 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3577 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3578 break;
3579 }
3580 case 8:
3581 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3582 break;
3583 }
3584 }
3585
3586 if (src != CONST0_RTX (GET_MODE (src)))
3587 {
3588 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3589 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3590 }
3591
3592 if (WORDS_BIG_ENDIAN)
3593 {
3594 emit_move_insn (meml, dstl);
3595 emit_move_insn (memh, dsth);
3596 }
3597 else
3598 {
3599 /* Must store high before low for degenerate case of aligned. */
3600 emit_move_insn (memh, dsth);
3601 emit_move_insn (meml, dstl);
3602 }
3603 }
3604
3605 /* The block move code tries to maximize speed by separating loads and
3606 stores at the expense of register pressure: we load all of the data
3607 before we store it back out. There are two secondary effects worth
3608 mentioning, that this speeds copying to/from aligned and unaligned
3609 buffers, and that it makes the code significantly easier to write. */
3610
3611 #define MAX_MOVE_WORDS 8
3612
3613 /* Load an integral number of consecutive unaligned quadwords. */
3614
3615 static void
3616 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3617 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3618 {
3619 rtx const im8 = GEN_INT (-8);
3620 rtx const i64 = GEN_INT (64);
3621 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3622 rtx sreg, areg, tmp, smema;
3623 HOST_WIDE_INT i;
3624
3625 smema = XEXP (smem, 0);
3626 if (GET_CODE (smema) == LO_SUM)
3627 smema = force_reg (Pmode, smema);
3628
3629 /* Generate all the tmp registers we need. */
3630 for (i = 0; i < words; ++i)
3631 {
3632 data_regs[i] = out_regs[i];
3633 ext_tmps[i] = gen_reg_rtx (DImode);
3634 }
3635 data_regs[words] = gen_reg_rtx (DImode);
3636
3637 if (ofs != 0)
3638 smem = adjust_address (smem, GET_MODE (smem), ofs);
3639
3640 /* Load up all of the source data. */
3641 for (i = 0; i < words; ++i)
3642 {
3643 tmp = change_address (smem, DImode,
3644 gen_rtx_AND (DImode,
3645 plus_constant (smema, 8*i),
3646 im8));
3647 set_mem_alias_set (tmp, 0);
3648 emit_move_insn (data_regs[i], tmp);
3649 }
3650
3651 tmp = change_address (smem, DImode,
3652 gen_rtx_AND (DImode,
3653 plus_constant (smema, 8*words - 1),
3654 im8));
3655 set_mem_alias_set (tmp, 0);
3656 emit_move_insn (data_regs[words], tmp);
3657
3658 /* Extract the half-word fragments. Unfortunately DEC decided to make
3659 extxh with offset zero a noop instead of zeroing the register, so
3660 we must take care of that edge condition ourselves with cmov. */
3661
3662 sreg = copy_addr_to_reg (smema);
3663 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3664 1, OPTAB_WIDEN);
3665 if (WORDS_BIG_ENDIAN)
3666 emit_move_insn (sreg, plus_constant (sreg, 7));
3667 for (i = 0; i < words; ++i)
3668 {
3669 if (WORDS_BIG_ENDIAN)
3670 {
3671 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3672 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3673 }
3674 else
3675 {
3676 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3677 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3678 }
3679 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3680 gen_rtx_IF_THEN_ELSE (DImode,
3681 gen_rtx_EQ (DImode, areg,
3682 const0_rtx),
3683 const0_rtx, ext_tmps[i])));
3684 }
3685
3686 /* Merge the half-words into whole words. */
3687 for (i = 0; i < words; ++i)
3688 {
3689 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3690 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3691 }
3692 }
3693
3694 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3695 may be NULL to store zeros. */
3696
3697 static void
3698 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3699 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3700 {
3701 rtx const im8 = GEN_INT (-8);
3702 rtx const i64 = GEN_INT (64);
3703 rtx ins_tmps[MAX_MOVE_WORDS];
3704 rtx st_tmp_1, st_tmp_2, dreg;
3705 rtx st_addr_1, st_addr_2, dmema;
3706 HOST_WIDE_INT i;
3707
3708 dmema = XEXP (dmem, 0);
3709 if (GET_CODE (dmema) == LO_SUM)
3710 dmema = force_reg (Pmode, dmema);
3711
3712 /* Generate all the tmp registers we need. */
3713 if (data_regs != NULL)
3714 for (i = 0; i < words; ++i)
3715 ins_tmps[i] = gen_reg_rtx(DImode);
3716 st_tmp_1 = gen_reg_rtx(DImode);
3717 st_tmp_2 = gen_reg_rtx(DImode);
3718
3719 if (ofs != 0)
3720 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3721
3722 st_addr_2 = change_address (dmem, DImode,
3723 gen_rtx_AND (DImode,
3724 plus_constant (dmema, words*8 - 1),
3725 im8));
3726 set_mem_alias_set (st_addr_2, 0);
3727
3728 st_addr_1 = change_address (dmem, DImode,
3729 gen_rtx_AND (DImode, dmema, im8));
3730 set_mem_alias_set (st_addr_1, 0);
3731
3732 /* Load up the destination end bits. */
3733 emit_move_insn (st_tmp_2, st_addr_2);
3734 emit_move_insn (st_tmp_1, st_addr_1);
3735
3736 /* Shift the input data into place. */
3737 dreg = copy_addr_to_reg (dmema);
3738 if (WORDS_BIG_ENDIAN)
3739 emit_move_insn (dreg, plus_constant (dreg, 7));
3740 if (data_regs != NULL)
3741 {
3742 for (i = words-1; i >= 0; --i)
3743 {
3744 if (WORDS_BIG_ENDIAN)
3745 {
3746 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3747 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3748 }
3749 else
3750 {
3751 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3752 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3753 }
3754 }
3755 for (i = words-1; i > 0; --i)
3756 {
3757 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3758 ins_tmps[i-1], ins_tmps[i-1], 1,
3759 OPTAB_WIDEN);
3760 }
3761 }
3762
3763 /* Split and merge the ends with the destination data. */
3764 if (WORDS_BIG_ENDIAN)
3765 {
3766 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3767 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3768 }
3769 else
3770 {
3771 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3772 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3773 }
3774
3775 if (data_regs != NULL)
3776 {
3777 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3778 st_tmp_2, 1, OPTAB_WIDEN);
3779 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3780 st_tmp_1, 1, OPTAB_WIDEN);
3781 }
3782
3783 /* Store it all. */
3784 if (WORDS_BIG_ENDIAN)
3785 emit_move_insn (st_addr_1, st_tmp_1);
3786 else
3787 emit_move_insn (st_addr_2, st_tmp_2);
3788 for (i = words-1; i > 0; --i)
3789 {
3790 rtx tmp = change_address (dmem, DImode,
3791 gen_rtx_AND (DImode,
3792 plus_constant(dmema,
3793 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3794 im8));
3795 set_mem_alias_set (tmp, 0);
3796 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3797 }
3798 if (WORDS_BIG_ENDIAN)
3799 emit_move_insn (st_addr_2, st_tmp_2);
3800 else
3801 emit_move_insn (st_addr_1, st_tmp_1);
3802 }
3803
3804
3805 /* Expand string/block move operations.
3806
3807 operands[0] is the pointer to the destination.
3808 operands[1] is the pointer to the source.
3809 operands[2] is the number of bytes to move.
3810 operands[3] is the alignment. */
3811
3812 int
3813 alpha_expand_block_move (rtx operands[])
3814 {
3815 rtx bytes_rtx = operands[2];
3816 rtx align_rtx = operands[3];
3817 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3818 HOST_WIDE_INT bytes = orig_bytes;
3819 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3820 HOST_WIDE_INT dst_align = src_align;
3821 rtx orig_src = operands[1];
3822 rtx orig_dst = operands[0];
3823 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3824 rtx tmp;
3825 unsigned int i, words, ofs, nregs = 0;
3826
3827 if (orig_bytes <= 0)
3828 return 1;
3829 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3830 return 0;
3831
3832 /* Look for additional alignment information from recorded register info. */
3833
3834 tmp = XEXP (orig_src, 0);
3835 if (REG_P (tmp))
3836 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3837 else if (GET_CODE (tmp) == PLUS
3838 && REG_P (XEXP (tmp, 0))
3839 && CONST_INT_P (XEXP (tmp, 1)))
3840 {
3841 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3842 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3843
3844 if (a > src_align)
3845 {
3846 if (a >= 64 && c % 8 == 0)
3847 src_align = 64;
3848 else if (a >= 32 && c % 4 == 0)
3849 src_align = 32;
3850 else if (a >= 16 && c % 2 == 0)
3851 src_align = 16;
3852 }
3853 }
3854
3855 tmp = XEXP (orig_dst, 0);
3856 if (REG_P (tmp))
3857 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3858 else if (GET_CODE (tmp) == PLUS
3859 && REG_P (XEXP (tmp, 0))
3860 && CONST_INT_P (XEXP (tmp, 1)))
3861 {
3862 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3863 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3864
3865 if (a > dst_align)
3866 {
3867 if (a >= 64 && c % 8 == 0)
3868 dst_align = 64;
3869 else if (a >= 32 && c % 4 == 0)
3870 dst_align = 32;
3871 else if (a >= 16 && c % 2 == 0)
3872 dst_align = 16;
3873 }
3874 }
3875
3876 ofs = 0;
3877 if (src_align >= 64 && bytes >= 8)
3878 {
3879 words = bytes / 8;
3880
3881 for (i = 0; i < words; ++i)
3882 data_regs[nregs + i] = gen_reg_rtx (DImode);
3883
3884 for (i = 0; i < words; ++i)
3885 emit_move_insn (data_regs[nregs + i],
3886 adjust_address (orig_src, DImode, ofs + i * 8));
3887
3888 nregs += words;
3889 bytes -= words * 8;
3890 ofs += words * 8;
3891 }
3892
3893 if (src_align >= 32 && bytes >= 4)
3894 {
3895 words = bytes / 4;
3896
3897 for (i = 0; i < words; ++i)
3898 data_regs[nregs + i] = gen_reg_rtx (SImode);
3899
3900 for (i = 0; i < words; ++i)
3901 emit_move_insn (data_regs[nregs + i],
3902 adjust_address (orig_src, SImode, ofs + i * 4));
3903
3904 nregs += words;
3905 bytes -= words * 4;
3906 ofs += words * 4;
3907 }
3908
3909 if (bytes >= 8)
3910 {
3911 words = bytes / 8;
3912
3913 for (i = 0; i < words+1; ++i)
3914 data_regs[nregs + i] = gen_reg_rtx (DImode);
3915
3916 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3917 words, ofs);
3918
3919 nregs += words;
3920 bytes -= words * 8;
3921 ofs += words * 8;
3922 }
3923
3924 if (! TARGET_BWX && bytes >= 4)
3925 {
3926 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3927 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3928 bytes -= 4;
3929 ofs += 4;
3930 }
3931
3932 if (bytes >= 2)
3933 {
3934 if (src_align >= 16)
3935 {
3936 do {
3937 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3938 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3939 bytes -= 2;
3940 ofs += 2;
3941 } while (bytes >= 2);
3942 }
3943 else if (! TARGET_BWX)
3944 {
3945 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3946 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3947 bytes -= 2;
3948 ofs += 2;
3949 }
3950 }
3951
3952 while (bytes > 0)
3953 {
3954 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3955 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3956 bytes -= 1;
3957 ofs += 1;
3958 }
3959
3960 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3961
3962 /* Now save it back out again. */
3963
3964 i = 0, ofs = 0;
3965
3966 /* Write out the data in whatever chunks reading the source allowed. */
3967 if (dst_align >= 64)
3968 {
3969 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 {
3971 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3972 data_regs[i]);
3973 ofs += 8;
3974 i++;
3975 }
3976 }
3977
3978 if (dst_align >= 32)
3979 {
3980 /* If the source has remaining DImode regs, write them out in
3981 two pieces. */
3982 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3983 {
3984 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3985 NULL_RTX, 1, OPTAB_WIDEN);
3986
3987 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3988 gen_lowpart (SImode, data_regs[i]));
3989 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3990 gen_lowpart (SImode, tmp));
3991 ofs += 8;
3992 i++;
3993 }
3994
3995 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3996 {
3997 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3998 data_regs[i]);
3999 ofs += 4;
4000 i++;
4001 }
4002 }
4003
4004 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4005 {
4006 /* Write out a remaining block of words using unaligned methods. */
4007
4008 for (words = 1; i + words < nregs; words++)
4009 if (GET_MODE (data_regs[i + words]) != DImode)
4010 break;
4011
4012 if (words == 1)
4013 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4014 else
4015 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4016 words, ofs);
4017
4018 i += words;
4019 ofs += words * 8;
4020 }
4021
4022 /* Due to the above, this won't be aligned. */
4023 /* ??? If we have more than one of these, consider constructing full
4024 words in registers and using alpha_expand_unaligned_store_words. */
4025 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4026 {
4027 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4028 ofs += 4;
4029 i++;
4030 }
4031
4032 if (dst_align >= 16)
4033 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4034 {
4035 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4036 i++;
4037 ofs += 2;
4038 }
4039 else
4040 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4041 {
4042 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4043 i++;
4044 ofs += 2;
4045 }
4046
4047 /* The remainder must be byte copies. */
4048 while (i < nregs)
4049 {
4050 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4051 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4052 i++;
4053 ofs += 1;
4054 }
4055
4056 return 1;
4057 }
4058
4059 int
4060 alpha_expand_block_clear (rtx operands[])
4061 {
4062 rtx bytes_rtx = operands[1];
4063 rtx align_rtx = operands[3];
4064 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4065 HOST_WIDE_INT bytes = orig_bytes;
4066 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4067 HOST_WIDE_INT alignofs = 0;
4068 rtx orig_dst = operands[0];
4069 rtx tmp;
4070 int i, words, ofs = 0;
4071
4072 if (orig_bytes <= 0)
4073 return 1;
4074 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4075 return 0;
4076
4077 /* Look for stricter alignment. */
4078 tmp = XEXP (orig_dst, 0);
4079 if (REG_P (tmp))
4080 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4081 else if (GET_CODE (tmp) == PLUS
4082 && REG_P (XEXP (tmp, 0))
4083 && CONST_INT_P (XEXP (tmp, 1)))
4084 {
4085 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4086 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4087
4088 if (a > align)
4089 {
4090 if (a >= 64)
4091 align = a, alignofs = 8 - c % 8;
4092 else if (a >= 32)
4093 align = a, alignofs = 4 - c % 4;
4094 else if (a >= 16)
4095 align = a, alignofs = 2 - c % 2;
4096 }
4097 }
4098
4099 /* Handle an unaligned prefix first. */
4100
4101 if (alignofs > 0)
4102 {
4103 #if HOST_BITS_PER_WIDE_INT >= 64
4104 /* Given that alignofs is bounded by align, the only time BWX could
4105 generate three stores is for a 7 byte fill. Prefer two individual
4106 stores over a load/mask/store sequence. */
4107 if ((!TARGET_BWX || alignofs == 7)
4108 && align >= 32
4109 && !(alignofs == 4 && bytes >= 4))
4110 {
4111 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4112 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4113 rtx mem, tmp;
4114 HOST_WIDE_INT mask;
4115
4116 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4117 set_mem_alias_set (mem, 0);
4118
4119 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4120 if (bytes < alignofs)
4121 {
4122 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4123 ofs += bytes;
4124 bytes = 0;
4125 }
4126 else
4127 {
4128 bytes -= alignofs;
4129 ofs += alignofs;
4130 }
4131 alignofs = 0;
4132
4133 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4134 NULL_RTX, 1, OPTAB_WIDEN);
4135
4136 emit_move_insn (mem, tmp);
4137 }
4138 #endif
4139
4140 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4141 {
4142 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4143 bytes -= 1;
4144 ofs += 1;
4145 alignofs -= 1;
4146 }
4147 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4148 {
4149 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4150 bytes -= 2;
4151 ofs += 2;
4152 alignofs -= 2;
4153 }
4154 if (alignofs == 4 && bytes >= 4)
4155 {
4156 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4157 bytes -= 4;
4158 ofs += 4;
4159 alignofs = 0;
4160 }
4161
4162 /* If we've not used the extra lead alignment information by now,
4163 we won't be able to. Downgrade align to match what's left over. */
4164 if (alignofs > 0)
4165 {
4166 alignofs = alignofs & -alignofs;
4167 align = MIN (align, alignofs * BITS_PER_UNIT);
4168 }
4169 }
4170
4171 /* Handle a block of contiguous long-words. */
4172
4173 if (align >= 64 && bytes >= 8)
4174 {
4175 words = bytes / 8;
4176
4177 for (i = 0; i < words; ++i)
4178 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4179 const0_rtx);
4180
4181 bytes -= words * 8;
4182 ofs += words * 8;
4183 }
4184
4185 /* If the block is large and appropriately aligned, emit a single
4186 store followed by a sequence of stq_u insns. */
4187
4188 if (align >= 32 && bytes > 16)
4189 {
4190 rtx orig_dsta;
4191
4192 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4193 bytes -= 4;
4194 ofs += 4;
4195
4196 orig_dsta = XEXP (orig_dst, 0);
4197 if (GET_CODE (orig_dsta) == LO_SUM)
4198 orig_dsta = force_reg (Pmode, orig_dsta);
4199
4200 words = bytes / 8;
4201 for (i = 0; i < words; ++i)
4202 {
4203 rtx mem
4204 = change_address (orig_dst, DImode,
4205 gen_rtx_AND (DImode,
4206 plus_constant (orig_dsta, ofs + i*8),
4207 GEN_INT (-8)));
4208 set_mem_alias_set (mem, 0);
4209 emit_move_insn (mem, const0_rtx);
4210 }
4211
4212 /* Depending on the alignment, the first stq_u may have overlapped
4213 with the initial stl, which means that the last stq_u didn't
4214 write as much as it would appear. Leave those questionable bytes
4215 unaccounted for. */
4216 bytes -= words * 8 - 4;
4217 ofs += words * 8 - 4;
4218 }
4219
4220 /* Handle a smaller block of aligned words. */
4221
4222 if ((align >= 64 && bytes == 4)
4223 || (align == 32 && bytes >= 4))
4224 {
4225 words = bytes / 4;
4226
4227 for (i = 0; i < words; ++i)
4228 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4229 const0_rtx);
4230
4231 bytes -= words * 4;
4232 ofs += words * 4;
4233 }
4234
4235 /* An unaligned block uses stq_u stores for as many as possible. */
4236
4237 if (bytes >= 8)
4238 {
4239 words = bytes / 8;
4240
4241 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4242
4243 bytes -= words * 8;
4244 ofs += words * 8;
4245 }
4246
4247 /* Next clean up any trailing pieces. */
4248
4249 #if HOST_BITS_PER_WIDE_INT >= 64
4250 /* Count the number of bits in BYTES for which aligned stores could
4251 be emitted. */
4252 words = 0;
4253 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4254 if (bytes & i)
4255 words += 1;
4256
4257 /* If we have appropriate alignment (and it wouldn't take too many
4258 instructions otherwise), mask out the bytes we need. */
4259 if (TARGET_BWX ? words > 2 : bytes > 0)
4260 {
4261 if (align >= 64)
4262 {
4263 rtx mem, tmp;
4264 HOST_WIDE_INT mask;
4265
4266 mem = adjust_address (orig_dst, DImode, ofs);
4267 set_mem_alias_set (mem, 0);
4268
4269 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4270
4271 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4272 NULL_RTX, 1, OPTAB_WIDEN);
4273
4274 emit_move_insn (mem, tmp);
4275 return 1;
4276 }
4277 else if (align >= 32 && bytes < 4)
4278 {
4279 rtx mem, tmp;
4280 HOST_WIDE_INT mask;
4281
4282 mem = adjust_address (orig_dst, SImode, ofs);
4283 set_mem_alias_set (mem, 0);
4284
4285 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4286
4287 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4288 NULL_RTX, 1, OPTAB_WIDEN);
4289
4290 emit_move_insn (mem, tmp);
4291 return 1;
4292 }
4293 }
4294 #endif
4295
4296 if (!TARGET_BWX && bytes >= 4)
4297 {
4298 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4299 bytes -= 4;
4300 ofs += 4;
4301 }
4302
4303 if (bytes >= 2)
4304 {
4305 if (align >= 16)
4306 {
4307 do {
4308 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4309 const0_rtx);
4310 bytes -= 2;
4311 ofs += 2;
4312 } while (bytes >= 2);
4313 }
4314 else if (! TARGET_BWX)
4315 {
4316 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4317 bytes -= 2;
4318 ofs += 2;
4319 }
4320 }
4321
4322 while (bytes > 0)
4323 {
4324 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4325 bytes -= 1;
4326 ofs += 1;
4327 }
4328
4329 return 1;
4330 }
4331
4332 /* Returns a mask so that zap(x, value) == x & mask. */
4333
4334 rtx
4335 alpha_expand_zap_mask (HOST_WIDE_INT value)
4336 {
4337 rtx result;
4338 int i;
4339
4340 if (HOST_BITS_PER_WIDE_INT >= 64)
4341 {
4342 HOST_WIDE_INT mask = 0;
4343
4344 for (i = 7; i >= 0; --i)
4345 {
4346 mask <<= 8;
4347 if (!((value >> i) & 1))
4348 mask |= 0xff;
4349 }
4350
4351 result = gen_int_mode (mask, DImode);
4352 }
4353 else
4354 {
4355 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4356
4357 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4358
4359 for (i = 7; i >= 4; --i)
4360 {
4361 mask_hi <<= 8;
4362 if (!((value >> i) & 1))
4363 mask_hi |= 0xff;
4364 }
4365
4366 for (i = 3; i >= 0; --i)
4367 {
4368 mask_lo <<= 8;
4369 if (!((value >> i) & 1))
4370 mask_lo |= 0xff;
4371 }
4372
4373 result = immed_double_const (mask_lo, mask_hi, DImode);
4374 }
4375
4376 return result;
4377 }
4378
4379 void
4380 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4381 enum machine_mode mode,
4382 rtx op0, rtx op1, rtx op2)
4383 {
4384 op0 = gen_lowpart (mode, op0);
4385
4386 if (op1 == const0_rtx)
4387 op1 = CONST0_RTX (mode);
4388 else
4389 op1 = gen_lowpart (mode, op1);
4390
4391 if (op2 == const0_rtx)
4392 op2 = CONST0_RTX (mode);
4393 else
4394 op2 = gen_lowpart (mode, op2);
4395
4396 emit_insn ((*gen) (op0, op1, op2));
4397 }
4398
4399 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4400 COND is true. Mark the jump as unlikely to be taken. */
4401
4402 static void
4403 emit_unlikely_jump (rtx cond, rtx label)
4404 {
4405 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4406 rtx x;
4407
4408 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4409 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4410 add_reg_note (x, REG_BR_PROB, very_unlikely);
4411 }
4412
4413 /* A subroutine of the atomic operation splitters. Emit a load-locked
4414 instruction in MODE. */
4415
4416 static void
4417 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4418 {
4419 rtx (*fn) (rtx, rtx) = NULL;
4420 if (mode == SImode)
4421 fn = gen_load_locked_si;
4422 else if (mode == DImode)
4423 fn = gen_load_locked_di;
4424 emit_insn (fn (reg, mem));
4425 }
4426
4427 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4428 instruction in MODE. */
4429
4430 static void
4431 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4432 {
4433 rtx (*fn) (rtx, rtx, rtx) = NULL;
4434 if (mode == SImode)
4435 fn = gen_store_conditional_si;
4436 else if (mode == DImode)
4437 fn = gen_store_conditional_di;
4438 emit_insn (fn (res, mem, val));
4439 }
4440
4441 /* A subroutine of the atomic operation splitters. Emit an insxl
4442 instruction in MODE. */
4443
4444 static rtx
4445 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4446 {
4447 rtx ret = gen_reg_rtx (DImode);
4448 rtx (*fn) (rtx, rtx, rtx);
4449
4450 if (WORDS_BIG_ENDIAN)
4451 {
4452 if (mode == QImode)
4453 fn = gen_insbl_be;
4454 else
4455 fn = gen_inswl_be;
4456 }
4457 else
4458 {
4459 if (mode == QImode)
4460 fn = gen_insbl_le;
4461 else
4462 fn = gen_inswl_le;
4463 }
4464 /* The insbl and inswl patterns require a register operand. */
4465 op1 = force_reg (mode, op1);
4466 emit_insn (fn (ret, op1, op2));
4467
4468 return ret;
4469 }
4470
4471 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4472 to perform. MEM is the memory on which to operate. VAL is the second
4473 operand of the binary operator. BEFORE and AFTER are optional locations to
4474 return the value of MEM either before of after the operation. SCRATCH is
4475 a scratch register. */
4476
4477 void
4478 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4479 rtx before, rtx after, rtx scratch)
4480 {
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4483
4484 emit_insn (gen_memory_barrier ());
4485
4486 label = gen_label_rtx ();
4487 emit_label (label);
4488 label = gen_rtx_LABEL_REF (DImode, label);
4489
4490 if (before == NULL)
4491 before = scratch;
4492 emit_load_locked (mode, before, mem);
4493
4494 if (code == NOT)
4495 {
4496 x = gen_rtx_AND (mode, before, val);
4497 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4498
4499 x = gen_rtx_NOT (mode, val);
4500 }
4501 else
4502 x = gen_rtx_fmt_ee (code, mode, before, val);
4503 if (after)
4504 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4505 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4506
4507 emit_store_conditional (mode, cond, mem, scratch);
4508
4509 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4510 emit_unlikely_jump (x, label);
4511
4512 emit_insn (gen_memory_barrier ());
4513 }
4514
4515 /* Expand a compare and swap operation. */
4516
4517 void
4518 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4519 rtx scratch)
4520 {
4521 enum machine_mode mode = GET_MODE (mem);
4522 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4523
4524 emit_insn (gen_memory_barrier ());
4525
4526 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4527 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4528 emit_label (XEXP (label1, 0));
4529
4530 emit_load_locked (mode, retval, mem);
4531
4532 x = gen_lowpart (DImode, retval);
4533 if (oldval == const0_rtx)
4534 x = gen_rtx_NE (DImode, x, const0_rtx);
4535 else
4536 {
4537 x = gen_rtx_EQ (DImode, x, oldval);
4538 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4539 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4540 }
4541 emit_unlikely_jump (x, label2);
4542
4543 emit_move_insn (scratch, newval);
4544 emit_store_conditional (mode, cond, mem, scratch);
4545
4546 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4547 emit_unlikely_jump (x, label1);
4548
4549 emit_insn (gen_memory_barrier ());
4550 emit_label (XEXP (label2, 0));
4551 }
4552
4553 void
4554 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4555 {
4556 enum machine_mode mode = GET_MODE (mem);
4557 rtx addr, align, wdst;
4558 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4559
4560 addr = force_reg (DImode, XEXP (mem, 0));
4561 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4562 NULL_RTX, 1, OPTAB_DIRECT);
4563
4564 oldval = convert_modes (DImode, mode, oldval, 1);
4565 newval = emit_insxl (mode, newval, addr);
4566
4567 wdst = gen_reg_rtx (DImode);
4568 if (mode == QImode)
4569 fn5 = gen_sync_compare_and_swapqi_1;
4570 else
4571 fn5 = gen_sync_compare_and_swaphi_1;
4572 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4573
4574 emit_move_insn (dst, gen_lowpart (mode, wdst));
4575 }
4576
4577 void
4578 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4579 rtx oldval, rtx newval, rtx align,
4580 rtx scratch, rtx cond)
4581 {
4582 rtx label1, label2, mem, width, mask, x;
4583
4584 mem = gen_rtx_MEM (DImode, align);
4585 MEM_VOLATILE_P (mem) = 1;
4586
4587 emit_insn (gen_memory_barrier ());
4588 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4589 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4590 emit_label (XEXP (label1, 0));
4591
4592 emit_load_locked (DImode, scratch, mem);
4593
4594 width = GEN_INT (GET_MODE_BITSIZE (mode));
4595 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4596 if (WORDS_BIG_ENDIAN)
4597 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4598 else
4599 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4600
4601 if (oldval == const0_rtx)
4602 x = gen_rtx_NE (DImode, dest, const0_rtx);
4603 else
4604 {
4605 x = gen_rtx_EQ (DImode, dest, oldval);
4606 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4607 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4608 }
4609 emit_unlikely_jump (x, label2);
4610
4611 if (WORDS_BIG_ENDIAN)
4612 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4613 else
4614 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4615 emit_insn (gen_iordi3 (scratch, scratch, newval));
4616
4617 emit_store_conditional (DImode, scratch, mem, scratch);
4618
4619 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4620 emit_unlikely_jump (x, label1);
4621
4622 emit_insn (gen_memory_barrier ());
4623 emit_label (XEXP (label2, 0));
4624 }
4625
4626 /* Expand an atomic exchange operation. */
4627
4628 void
4629 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4630 {
4631 enum machine_mode mode = GET_MODE (mem);
4632 rtx label, x, cond = gen_lowpart (DImode, scratch);
4633
4634 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4635 emit_label (XEXP (label, 0));
4636
4637 emit_load_locked (mode, retval, mem);
4638 emit_move_insn (scratch, val);
4639 emit_store_conditional (mode, cond, mem, scratch);
4640
4641 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4642 emit_unlikely_jump (x, label);
4643
4644 emit_insn (gen_memory_barrier ());
4645 }
4646
4647 void
4648 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4649 {
4650 enum machine_mode mode = GET_MODE (mem);
4651 rtx addr, align, wdst;
4652 rtx (*fn4) (rtx, rtx, rtx, rtx);
4653
4654 /* Force the address into a register. */
4655 addr = force_reg (DImode, XEXP (mem, 0));
4656
4657 /* Align it to a multiple of 8. */
4658 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4659 NULL_RTX, 1, OPTAB_DIRECT);
4660
4661 /* Insert val into the correct byte location within the word. */
4662 val = emit_insxl (mode, val, addr);
4663
4664 wdst = gen_reg_rtx (DImode);
4665 if (mode == QImode)
4666 fn4 = gen_sync_lock_test_and_setqi_1;
4667 else
4668 fn4 = gen_sync_lock_test_and_sethi_1;
4669 emit_insn (fn4 (wdst, addr, val, align));
4670
4671 emit_move_insn (dst, gen_lowpart (mode, wdst));
4672 }
4673
4674 void
4675 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4676 rtx val, rtx align, rtx scratch)
4677 {
4678 rtx label, mem, width, mask, x;
4679
4680 mem = gen_rtx_MEM (DImode, align);
4681 MEM_VOLATILE_P (mem) = 1;
4682
4683 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4684 emit_label (XEXP (label, 0));
4685
4686 emit_load_locked (DImode, scratch, mem);
4687
4688 width = GEN_INT (GET_MODE_BITSIZE (mode));
4689 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4690 if (WORDS_BIG_ENDIAN)
4691 {
4692 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4693 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4694 }
4695 else
4696 {
4697 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4698 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4699 }
4700 emit_insn (gen_iordi3 (scratch, scratch, val));
4701
4702 emit_store_conditional (DImode, scratch, mem, scratch);
4703
4704 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4705 emit_unlikely_jump (x, label);
4706
4707 emit_insn (gen_memory_barrier ());
4708 }
4709 \f
4710 /* Adjust the cost of a scheduling dependency. Return the new cost of
4711 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4712
4713 static int
4714 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4715 {
4716 enum attr_type insn_type, dep_insn_type;
4717
4718 /* If the dependence is an anti-dependence, there is no cost. For an
4719 output dependence, there is sometimes a cost, but it doesn't seem
4720 worth handling those few cases. */
4721 if (REG_NOTE_KIND (link) != 0)
4722 return cost;
4723
4724 /* If we can't recognize the insns, we can't really do anything. */
4725 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4726 return cost;
4727
4728 insn_type = get_attr_type (insn);
4729 dep_insn_type = get_attr_type (dep_insn);
4730
4731 /* Bring in the user-defined memory latency. */
4732 if (dep_insn_type == TYPE_ILD
4733 || dep_insn_type == TYPE_FLD
4734 || dep_insn_type == TYPE_LDSYM)
4735 cost += alpha_memory_latency-1;
4736
4737 /* Everything else handled in DFA bypasses now. */
4738
4739 return cost;
4740 }
4741
4742 /* The number of instructions that can be issued per cycle. */
4743
4744 static int
4745 alpha_issue_rate (void)
4746 {
4747 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4748 }
4749
4750 /* How many alternative schedules to try. This should be as wide as the
4751 scheduling freedom in the DFA, but no wider. Making this value too
4752 large results extra work for the scheduler.
4753
4754 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4755 alternative schedules. For EV5, we can choose between E0/E1 and
4756 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4757
4758 static int
4759 alpha_multipass_dfa_lookahead (void)
4760 {
4761 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4762 }
4763 \f
4764 /* Machine-specific function data. */
4765
4766 struct GTY(()) machine_function
4767 {
4768 /* For unicosmk. */
4769 /* List of call information words for calls from this function. */
4770 struct rtx_def *first_ciw;
4771 struct rtx_def *last_ciw;
4772 int ciw_count;
4773
4774 /* List of deferred case vectors. */
4775 struct rtx_def *addr_list;
4776
4777 /* For OSF. */
4778 const char *some_ld_name;
4779
4780 /* For TARGET_LD_BUGGY_LDGP. */
4781 struct rtx_def *gp_save_rtx;
4782 };
4783
4784 /* How to allocate a 'struct machine_function'. */
4785
4786 static struct machine_function *
4787 alpha_init_machine_status (void)
4788 {
4789 return ((struct machine_function *)
4790 ggc_alloc_cleared (sizeof (struct machine_function)));
4791 }
4792
4793 /* Functions to save and restore alpha_return_addr_rtx. */
4794
4795 /* Start the ball rolling with RETURN_ADDR_RTX. */
4796
4797 rtx
4798 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4799 {
4800 if (count != 0)
4801 return const0_rtx;
4802
4803 return get_hard_reg_initial_val (Pmode, REG_RA);
4804 }
4805
4806 /* Return or create a memory slot containing the gp value for the current
4807 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4808
4809 rtx
4810 alpha_gp_save_rtx (void)
4811 {
4812 rtx seq, m = cfun->machine->gp_save_rtx;
4813
4814 if (m == NULL)
4815 {
4816 start_sequence ();
4817
4818 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4819 m = validize_mem (m);
4820 emit_move_insn (m, pic_offset_table_rtx);
4821
4822 seq = get_insns ();
4823 end_sequence ();
4824
4825 /* We used to simply emit the sequence after entry_of_function.
4826 However this breaks the CFG if the first instruction in the
4827 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4828 label. Emit the sequence properly on the edge. We are only
4829 invoked from dw2_build_landing_pads and finish_eh_generation
4830 will call commit_edge_insertions thanks to a kludge. */
4831 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4832
4833 cfun->machine->gp_save_rtx = m;
4834 }
4835
4836 return m;
4837 }
4838
4839 static int
4840 alpha_ra_ever_killed (void)
4841 {
4842 rtx top;
4843
4844 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4845 return (int)df_regs_ever_live_p (REG_RA);
4846
4847 push_topmost_sequence ();
4848 top = get_insns ();
4849 pop_topmost_sequence ();
4850
4851 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4852 }
4853
4854 \f
4855 /* Return the trap mode suffix applicable to the current
4856 instruction, or NULL. */
4857
4858 static const char *
4859 get_trap_mode_suffix (void)
4860 {
4861 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4862
4863 switch (s)
4864 {
4865 case TRAP_SUFFIX_NONE:
4866 return NULL;
4867
4868 case TRAP_SUFFIX_SU:
4869 if (alpha_fptm >= ALPHA_FPTM_SU)
4870 return "su";
4871 return NULL;
4872
4873 case TRAP_SUFFIX_SUI:
4874 if (alpha_fptm >= ALPHA_FPTM_SUI)
4875 return "sui";
4876 return NULL;
4877
4878 case TRAP_SUFFIX_V_SV:
4879 switch (alpha_fptm)
4880 {
4881 case ALPHA_FPTM_N:
4882 return NULL;
4883 case ALPHA_FPTM_U:
4884 return "v";
4885 case ALPHA_FPTM_SU:
4886 case ALPHA_FPTM_SUI:
4887 return "sv";
4888 default:
4889 gcc_unreachable ();
4890 }
4891
4892 case TRAP_SUFFIX_V_SV_SVI:
4893 switch (alpha_fptm)
4894 {
4895 case ALPHA_FPTM_N:
4896 return NULL;
4897 case ALPHA_FPTM_U:
4898 return "v";
4899 case ALPHA_FPTM_SU:
4900 return "sv";
4901 case ALPHA_FPTM_SUI:
4902 return "svi";
4903 default:
4904 gcc_unreachable ();
4905 }
4906 break;
4907
4908 case TRAP_SUFFIX_U_SU_SUI:
4909 switch (alpha_fptm)
4910 {
4911 case ALPHA_FPTM_N:
4912 return NULL;
4913 case ALPHA_FPTM_U:
4914 return "u";
4915 case ALPHA_FPTM_SU:
4916 return "su";
4917 case ALPHA_FPTM_SUI:
4918 return "sui";
4919 default:
4920 gcc_unreachable ();
4921 }
4922 break;
4923
4924 default:
4925 gcc_unreachable ();
4926 }
4927 gcc_unreachable ();
4928 }
4929
4930 /* Return the rounding mode suffix applicable to the current
4931 instruction, or NULL. */
4932
4933 static const char *
4934 get_round_mode_suffix (void)
4935 {
4936 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4937
4938 switch (s)
4939 {
4940 case ROUND_SUFFIX_NONE:
4941 return NULL;
4942 case ROUND_SUFFIX_NORMAL:
4943 switch (alpha_fprm)
4944 {
4945 case ALPHA_FPRM_NORM:
4946 return NULL;
4947 case ALPHA_FPRM_MINF:
4948 return "m";
4949 case ALPHA_FPRM_CHOP:
4950 return "c";
4951 case ALPHA_FPRM_DYN:
4952 return "d";
4953 default:
4954 gcc_unreachable ();
4955 }
4956 break;
4957
4958 case ROUND_SUFFIX_C:
4959 return "c";
4960
4961 default:
4962 gcc_unreachable ();
4963 }
4964 gcc_unreachable ();
4965 }
4966
4967 /* Locate some local-dynamic symbol still in use by this function
4968 so that we can print its name in some movdi_er_tlsldm pattern. */
4969
4970 static int
4971 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4972 {
4973 rtx x = *px;
4974
4975 if (GET_CODE (x) == SYMBOL_REF
4976 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4977 {
4978 cfun->machine->some_ld_name = XSTR (x, 0);
4979 return 1;
4980 }
4981
4982 return 0;
4983 }
4984
4985 static const char *
4986 get_some_local_dynamic_name (void)
4987 {
4988 rtx insn;
4989
4990 if (cfun->machine->some_ld_name)
4991 return cfun->machine->some_ld_name;
4992
4993 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4994 if (INSN_P (insn)
4995 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4996 return cfun->machine->some_ld_name;
4997
4998 gcc_unreachable ();
4999 }
5000
5001 /* Print an operand. Recognize special options, documented below. */
5002
5003 void
5004 print_operand (FILE *file, rtx x, int code)
5005 {
5006 int i;
5007
5008 switch (code)
5009 {
5010 case '~':
5011 /* Print the assembler name of the current function. */
5012 assemble_name (file, alpha_fnname);
5013 break;
5014
5015 case '&':
5016 assemble_name (file, get_some_local_dynamic_name ());
5017 break;
5018
5019 case '/':
5020 {
5021 const char *trap = get_trap_mode_suffix ();
5022 const char *round = get_round_mode_suffix ();
5023
5024 if (trap || round)
5025 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5026 (trap ? trap : ""), (round ? round : ""));
5027 break;
5028 }
5029
5030 case ',':
5031 /* Generates single precision instruction suffix. */
5032 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5033 break;
5034
5035 case '-':
5036 /* Generates double precision instruction suffix. */
5037 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5038 break;
5039
5040 case '#':
5041 if (alpha_this_literal_sequence_number == 0)
5042 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5043 fprintf (file, "%d", alpha_this_literal_sequence_number);
5044 break;
5045
5046 case '*':
5047 if (alpha_this_gpdisp_sequence_number == 0)
5048 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5049 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5050 break;
5051
5052 case 'H':
5053 if (GET_CODE (x) == HIGH)
5054 output_addr_const (file, XEXP (x, 0));
5055 else
5056 output_operand_lossage ("invalid %%H value");
5057 break;
5058
5059 case 'J':
5060 {
5061 const char *lituse;
5062
5063 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5064 {
5065 x = XVECEXP (x, 0, 0);
5066 lituse = "lituse_tlsgd";
5067 }
5068 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5069 {
5070 x = XVECEXP (x, 0, 0);
5071 lituse = "lituse_tlsldm";
5072 }
5073 else if (CONST_INT_P (x))
5074 lituse = "lituse_jsr";
5075 else
5076 {
5077 output_operand_lossage ("invalid %%J value");
5078 break;
5079 }
5080
5081 if (x != const0_rtx)
5082 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5083 }
5084 break;
5085
5086 case 'j':
5087 {
5088 const char *lituse;
5089
5090 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5091 lituse = "lituse_jsrdirect";
5092 #else
5093 lituse = "lituse_jsr";
5094 #endif
5095
5096 gcc_assert (INTVAL (x) != 0);
5097 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5098 }
5099 break;
5100 case 'r':
5101 /* If this operand is the constant zero, write it as "$31". */
5102 if (REG_P (x))
5103 fprintf (file, "%s", reg_names[REGNO (x)]);
5104 else if (x == CONST0_RTX (GET_MODE (x)))
5105 fprintf (file, "$31");
5106 else
5107 output_operand_lossage ("invalid %%r value");
5108 break;
5109
5110 case 'R':
5111 /* Similar, but for floating-point. */
5112 if (REG_P (x))
5113 fprintf (file, "%s", reg_names[REGNO (x)]);
5114 else if (x == CONST0_RTX (GET_MODE (x)))
5115 fprintf (file, "$f31");
5116 else
5117 output_operand_lossage ("invalid %%R value");
5118 break;
5119
5120 case 'N':
5121 /* Write the 1's complement of a constant. */
5122 if (!CONST_INT_P (x))
5123 output_operand_lossage ("invalid %%N value");
5124
5125 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5126 break;
5127
5128 case 'P':
5129 /* Write 1 << C, for a constant C. */
5130 if (!CONST_INT_P (x))
5131 output_operand_lossage ("invalid %%P value");
5132
5133 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5134 break;
5135
5136 case 'h':
5137 /* Write the high-order 16 bits of a constant, sign-extended. */
5138 if (!CONST_INT_P (x))
5139 output_operand_lossage ("invalid %%h value");
5140
5141 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5142 break;
5143
5144 case 'L':
5145 /* Write the low-order 16 bits of a constant, sign-extended. */
5146 if (!CONST_INT_P (x))
5147 output_operand_lossage ("invalid %%L value");
5148
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5150 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5151 break;
5152
5153 case 'm':
5154 /* Write mask for ZAP insn. */
5155 if (GET_CODE (x) == CONST_DOUBLE)
5156 {
5157 HOST_WIDE_INT mask = 0;
5158 HOST_WIDE_INT value;
5159
5160 value = CONST_DOUBLE_LOW (x);
5161 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5162 i++, value >>= 8)
5163 if (value & 0xff)
5164 mask |= (1 << i);
5165
5166 value = CONST_DOUBLE_HIGH (x);
5167 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5168 i++, value >>= 8)
5169 if (value & 0xff)
5170 mask |= (1 << (i + sizeof (int)));
5171
5172 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5173 }
5174
5175 else if (CONST_INT_P (x))
5176 {
5177 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5178
5179 for (i = 0; i < 8; i++, value >>= 8)
5180 if (value & 0xff)
5181 mask |= (1 << i);
5182
5183 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5184 }
5185 else
5186 output_operand_lossage ("invalid %%m value");
5187 break;
5188
5189 case 'M':
5190 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5191 if (!CONST_INT_P (x)
5192 || (INTVAL (x) != 8 && INTVAL (x) != 16
5193 && INTVAL (x) != 32 && INTVAL (x) != 64))
5194 output_operand_lossage ("invalid %%M value");
5195
5196 fprintf (file, "%s",
5197 (INTVAL (x) == 8 ? "b"
5198 : INTVAL (x) == 16 ? "w"
5199 : INTVAL (x) == 32 ? "l"
5200 : "q"));
5201 break;
5202
5203 case 'U':
5204 /* Similar, except do it from the mask. */
5205 if (CONST_INT_P (x))
5206 {
5207 HOST_WIDE_INT value = INTVAL (x);
5208
5209 if (value == 0xff)
5210 {
5211 fputc ('b', file);
5212 break;
5213 }
5214 if (value == 0xffff)
5215 {
5216 fputc ('w', file);
5217 break;
5218 }
5219 if (value == 0xffffffff)
5220 {
5221 fputc ('l', file);
5222 break;
5223 }
5224 if (value == -1)
5225 {
5226 fputc ('q', file);
5227 break;
5228 }
5229 }
5230 else if (HOST_BITS_PER_WIDE_INT == 32
5231 && GET_CODE (x) == CONST_DOUBLE
5232 && CONST_DOUBLE_LOW (x) == 0xffffffff
5233 && CONST_DOUBLE_HIGH (x) == 0)
5234 {
5235 fputc ('l', file);
5236 break;
5237 }
5238 output_operand_lossage ("invalid %%U value");
5239 break;
5240
5241 case 's':
5242 /* Write the constant value divided by 8 for little-endian mode or
5243 (56 - value) / 8 for big-endian mode. */
5244
5245 if (!CONST_INT_P (x)
5246 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5247 ? 56
5248 : 64)
5249 || (INTVAL (x) & 7) != 0)
5250 output_operand_lossage ("invalid %%s value");
5251
5252 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5253 WORDS_BIG_ENDIAN
5254 ? (56 - INTVAL (x)) / 8
5255 : INTVAL (x) / 8);
5256 break;
5257
5258 case 'S':
5259 /* Same, except compute (64 - c) / 8 */
5260
5261 if (!CONST_INT_P (x)
5262 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5263 && (INTVAL (x) & 7) != 8)
5264 output_operand_lossage ("invalid %%s value");
5265
5266 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5267 break;
5268
5269 case 't':
5270 {
5271 /* On Unicos/Mk systems: use a DEX expression if the symbol
5272 clashes with a register name. */
5273 int dex = unicosmk_need_dex (x);
5274 if (dex)
5275 fprintf (file, "DEX(%d)", dex);
5276 else
5277 output_addr_const (file, x);
5278 }
5279 break;
5280
5281 case 'C': case 'D': case 'c': case 'd':
5282 /* Write out comparison name. */
5283 {
5284 enum rtx_code c = GET_CODE (x);
5285
5286 if (!COMPARISON_P (x))
5287 output_operand_lossage ("invalid %%C value");
5288
5289 else if (code == 'D')
5290 c = reverse_condition (c);
5291 else if (code == 'c')
5292 c = swap_condition (c);
5293 else if (code == 'd')
5294 c = swap_condition (reverse_condition (c));
5295
5296 if (c == LEU)
5297 fprintf (file, "ule");
5298 else if (c == LTU)
5299 fprintf (file, "ult");
5300 else if (c == UNORDERED)
5301 fprintf (file, "un");
5302 else
5303 fprintf (file, "%s", GET_RTX_NAME (c));
5304 }
5305 break;
5306
5307 case 'E':
5308 /* Write the divide or modulus operator. */
5309 switch (GET_CODE (x))
5310 {
5311 case DIV:
5312 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5313 break;
5314 case UDIV:
5315 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5316 break;
5317 case MOD:
5318 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5319 break;
5320 case UMOD:
5321 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5322 break;
5323 default:
5324 output_operand_lossage ("invalid %%E value");
5325 break;
5326 }
5327 break;
5328
5329 case 'A':
5330 /* Write "_u" for unaligned access. */
5331 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5332 fprintf (file, "_u");
5333 break;
5334
5335 case 0:
5336 if (REG_P (x))
5337 fprintf (file, "%s", reg_names[REGNO (x)]);
5338 else if (MEM_P (x))
5339 output_address (XEXP (x, 0));
5340 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5341 {
5342 switch (XINT (XEXP (x, 0), 1))
5343 {
5344 case UNSPEC_DTPREL:
5345 case UNSPEC_TPREL:
5346 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5347 break;
5348 default:
5349 output_operand_lossage ("unknown relocation unspec");
5350 break;
5351 }
5352 }
5353 else
5354 output_addr_const (file, x);
5355 break;
5356
5357 default:
5358 output_operand_lossage ("invalid %%xn code");
5359 }
5360 }
5361
5362 void
5363 print_operand_address (FILE *file, rtx addr)
5364 {
5365 int basereg = 31;
5366 HOST_WIDE_INT offset = 0;
5367
5368 if (GET_CODE (addr) == AND)
5369 addr = XEXP (addr, 0);
5370
5371 if (GET_CODE (addr) == PLUS
5372 && CONST_INT_P (XEXP (addr, 1)))
5373 {
5374 offset = INTVAL (XEXP (addr, 1));
5375 addr = XEXP (addr, 0);
5376 }
5377
5378 if (GET_CODE (addr) == LO_SUM)
5379 {
5380 const char *reloc16, *reloclo;
5381 rtx op1 = XEXP (addr, 1);
5382
5383 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5384 {
5385 op1 = XEXP (op1, 0);
5386 switch (XINT (op1, 1))
5387 {
5388 case UNSPEC_DTPREL:
5389 reloc16 = NULL;
5390 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5391 break;
5392 case UNSPEC_TPREL:
5393 reloc16 = NULL;
5394 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5395 break;
5396 default:
5397 output_operand_lossage ("unknown relocation unspec");
5398 return;
5399 }
5400
5401 output_addr_const (file, XVECEXP (op1, 0, 0));
5402 }
5403 else
5404 {
5405 reloc16 = "gprel";
5406 reloclo = "gprellow";
5407 output_addr_const (file, op1);
5408 }
5409
5410 if (offset)
5411 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5412
5413 addr = XEXP (addr, 0);
5414 switch (GET_CODE (addr))
5415 {
5416 case REG:
5417 basereg = REGNO (addr);
5418 break;
5419
5420 case SUBREG:
5421 basereg = subreg_regno (addr);
5422 break;
5423
5424 default:
5425 gcc_unreachable ();
5426 }
5427
5428 fprintf (file, "($%d)\t\t!%s", basereg,
5429 (basereg == 29 ? reloc16 : reloclo));
5430 return;
5431 }
5432
5433 switch (GET_CODE (addr))
5434 {
5435 case REG:
5436 basereg = REGNO (addr);
5437 break;
5438
5439 case SUBREG:
5440 basereg = subreg_regno (addr);
5441 break;
5442
5443 case CONST_INT:
5444 offset = INTVAL (addr);
5445 break;
5446
5447 #if TARGET_ABI_OPEN_VMS
5448 case SYMBOL_REF:
5449 fprintf (file, "%s", XSTR (addr, 0));
5450 return;
5451
5452 case CONST:
5453 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5454 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5455 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5456 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5457 INTVAL (XEXP (XEXP (addr, 0), 1)));
5458 return;
5459
5460 #endif
5461 default:
5462 gcc_unreachable ();
5463 }
5464
5465 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5466 }
5467 \f
5468 /* Emit RTL insns to initialize the variable parts of a trampoline at
5469 TRAMP. FNADDR is an RTX for the address of the function's pure
5470 code. CXT is an RTX for the static chain value for the function.
5471
5472 The three offset parameters are for the individual template's
5473 layout. A JMPOFS < 0 indicates that the trampoline does not
5474 contain instructions at all.
5475
5476 We assume here that a function will be called many more times than
5477 its address is taken (e.g., it might be passed to qsort), so we
5478 take the trouble to initialize the "hint" field in the JMP insn.
5479 Note that the hint field is PC (new) + 4 * bits 13:0. */
5480
5481 void
5482 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5483 int fnofs, int cxtofs, int jmpofs)
5484 {
5485 rtx addr;
5486 /* VMS really uses DImode pointers in memory at this point. */
5487 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5488
5489 #ifdef POINTERS_EXTEND_UNSIGNED
5490 fnaddr = convert_memory_address (mode, fnaddr);
5491 cxt = convert_memory_address (mode, cxt);
5492 #endif
5493
5494 if (TARGET_ABI_OPEN_VMS)
5495 {
5496 rtx temp1, traddr;
5497 const char *fnname;
5498 char *trname;
5499
5500 /* Construct the name of the trampoline entry point. */
5501 fnname = XSTR (fnaddr, 0);
5502 trname = (char *) alloca (strlen (fnname) + 5);
5503 strcpy (trname, fnname);
5504 strcat (trname, "..tr");
5505 traddr = gen_rtx_SYMBOL_REF
5506 (mode, ggc_alloc_string (trname, strlen (trname) + 1));
5507
5508 /* Trampoline (or "bounded") procedure descriptor is constructed from
5509 the function's procedure descriptor with certain fields zeroed IAW
5510 the VMS calling standard. This is stored in the first quadword. */
5511 temp1 = force_reg (DImode, gen_rtx_MEM (DImode, fnaddr));
5512 temp1 = expand_and (DImode, temp1,
5513 GEN_INT (0xffff0fff0000fff0), NULL_RTX);
5514 addr = memory_address (mode, plus_constant (tramp, 0));
5515 emit_move_insn (gen_rtx_MEM (DImode, addr), temp1);
5516
5517 /* Trampoline transfer address is stored in the second quadword
5518 of the trampoline. */
5519 addr = memory_address (mode, plus_constant (tramp, 8));
5520 emit_move_insn (gen_rtx_MEM (mode, addr), traddr);
5521 }
5522
5523 /* Store function address and CXT. */
5524 addr = memory_address (mode, plus_constant (tramp, fnofs));
5525 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5526 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5527 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5528
5529 #ifdef ENABLE_EXECUTE_STACK
5530 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5531 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
5532 #endif
5533
5534 if (jmpofs >= 0)
5535 emit_insn (gen_imb ());
5536 }
5537 \f
5538 /* Determine where to put an argument to a function.
5539 Value is zero to push the argument on the stack,
5540 or a hard register in which to store the argument.
5541
5542 MODE is the argument's machine mode.
5543 TYPE is the data type of the argument (as a tree).
5544 This is null for libcalls where that information may
5545 not be available.
5546 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5547 the preceding args and about the function being called.
5548 NAMED is nonzero if this argument is a named parameter
5549 (otherwise it is an extra parameter matching an ellipsis).
5550
5551 On Alpha the first 6 words of args are normally in registers
5552 and the rest are pushed. */
5553
5554 rtx
5555 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5556 int named ATTRIBUTE_UNUSED)
5557 {
5558 int basereg;
5559 int num_args;
5560
5561 /* Don't get confused and pass small structures in FP registers. */
5562 if (type && AGGREGATE_TYPE_P (type))
5563 basereg = 16;
5564 else
5565 {
5566 #ifdef ENABLE_CHECKING
5567 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5568 values here. */
5569 gcc_assert (!COMPLEX_MODE_P (mode));
5570 #endif
5571
5572 /* Set up defaults for FP operands passed in FP registers, and
5573 integral operands passed in integer registers. */
5574 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5575 basereg = 32 + 16;
5576 else
5577 basereg = 16;
5578 }
5579
5580 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5581 the three platforms, so we can't avoid conditional compilation. */
5582 #if TARGET_ABI_OPEN_VMS
5583 {
5584 if (mode == VOIDmode)
5585 return alpha_arg_info_reg_val (cum);
5586
5587 num_args = cum.num_args;
5588 if (num_args >= 6
5589 || targetm.calls.must_pass_in_stack (mode, type))
5590 return NULL_RTX;
5591 }
5592 #elif TARGET_ABI_UNICOSMK
5593 {
5594 int size;
5595
5596 /* If this is the last argument, generate the call info word (CIW). */
5597 /* ??? We don't include the caller's line number in the CIW because
5598 I don't know how to determine it if debug infos are turned off. */
5599 if (mode == VOIDmode)
5600 {
5601 int i;
5602 HOST_WIDE_INT lo;
5603 HOST_WIDE_INT hi;
5604 rtx ciw;
5605
5606 lo = 0;
5607
5608 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5609 if (cum.reg_args_type[i])
5610 lo |= (1 << (7 - i));
5611
5612 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5613 lo |= 7;
5614 else
5615 lo |= cum.num_reg_words;
5616
5617 #if HOST_BITS_PER_WIDE_INT == 32
5618 hi = (cum.num_args << 20) | cum.num_arg_words;
5619 #else
5620 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5621 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5622 hi = 0;
5623 #endif
5624 ciw = immed_double_const (lo, hi, DImode);
5625
5626 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5627 UNSPEC_UMK_LOAD_CIW);
5628 }
5629
5630 size = ALPHA_ARG_SIZE (mode, type, named);
5631 num_args = cum.num_reg_words;
5632 if (cum.force_stack
5633 || cum.num_reg_words + size > 6
5634 || targetm.calls.must_pass_in_stack (mode, type))
5635 return NULL_RTX;
5636 else if (type && TYPE_MODE (type) == BLKmode)
5637 {
5638 rtx reg1, reg2;
5639
5640 reg1 = gen_rtx_REG (DImode, num_args + 16);
5641 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5642
5643 /* The argument fits in two registers. Note that we still need to
5644 reserve a register for empty structures. */
5645 if (size == 0)
5646 return NULL_RTX;
5647 else if (size == 1)
5648 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5649 else
5650 {
5651 reg2 = gen_rtx_REG (DImode, num_args + 17);
5652 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5653 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5654 }
5655 }
5656 }
5657 #elif TARGET_ABI_OSF
5658 {
5659 if (cum >= 6)
5660 return NULL_RTX;
5661 num_args = cum;
5662
5663 /* VOID is passed as a special flag for "last argument". */
5664 if (type == void_type_node)
5665 basereg = 16;
5666 else if (targetm.calls.must_pass_in_stack (mode, type))
5667 return NULL_RTX;
5668 }
5669 #else
5670 #error Unhandled ABI
5671 #endif
5672
5673 return gen_rtx_REG (mode, num_args + basereg);
5674 }
5675
5676 static int
5677 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5678 enum machine_mode mode ATTRIBUTE_UNUSED,
5679 tree type ATTRIBUTE_UNUSED,
5680 bool named ATTRIBUTE_UNUSED)
5681 {
5682 int words = 0;
5683
5684 #if TARGET_ABI_OPEN_VMS
5685 if (cum->num_args < 6
5686 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5687 words = 6 - cum->num_args;
5688 #elif TARGET_ABI_UNICOSMK
5689 /* Never any split arguments. */
5690 #elif TARGET_ABI_OSF
5691 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5692 words = 6 - *cum;
5693 #else
5694 #error Unhandled ABI
5695 #endif
5696
5697 return words * UNITS_PER_WORD;
5698 }
5699
5700
5701 /* Return true if TYPE must be returned in memory, instead of in registers. */
5702
5703 static bool
5704 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5705 {
5706 enum machine_mode mode = VOIDmode;
5707 int size;
5708
5709 if (type)
5710 {
5711 mode = TYPE_MODE (type);
5712
5713 /* All aggregates are returned in memory. */
5714 if (AGGREGATE_TYPE_P (type))
5715 return true;
5716 }
5717
5718 size = GET_MODE_SIZE (mode);
5719 switch (GET_MODE_CLASS (mode))
5720 {
5721 case MODE_VECTOR_FLOAT:
5722 /* Pass all float vectors in memory, like an aggregate. */
5723 return true;
5724
5725 case MODE_COMPLEX_FLOAT:
5726 /* We judge complex floats on the size of their element,
5727 not the size of the whole type. */
5728 size = GET_MODE_UNIT_SIZE (mode);
5729 break;
5730
5731 case MODE_INT:
5732 case MODE_FLOAT:
5733 case MODE_COMPLEX_INT:
5734 case MODE_VECTOR_INT:
5735 break;
5736
5737 default:
5738 /* ??? We get called on all sorts of random stuff from
5739 aggregate_value_p. We must return something, but it's not
5740 clear what's safe to return. Pretend it's a struct I
5741 guess. */
5742 return true;
5743 }
5744
5745 /* Otherwise types must fit in one register. */
5746 return size > UNITS_PER_WORD;
5747 }
5748
5749 /* Return true if TYPE should be passed by invisible reference. */
5750
5751 static bool
5752 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5753 enum machine_mode mode,
5754 const_tree type ATTRIBUTE_UNUSED,
5755 bool named ATTRIBUTE_UNUSED)
5756 {
5757 return mode == TFmode || mode == TCmode;
5758 }
5759
5760 /* Define how to find the value returned by a function. VALTYPE is the
5761 data type of the value (as a tree). If the precise function being
5762 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5763 MODE is set instead of VALTYPE for libcalls.
5764
5765 On Alpha the value is found in $0 for integer functions and
5766 $f0 for floating-point functions. */
5767
5768 rtx
5769 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5770 enum machine_mode mode)
5771 {
5772 unsigned int regnum, dummy;
5773 enum mode_class mclass;
5774
5775 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5776
5777 if (valtype)
5778 mode = TYPE_MODE (valtype);
5779
5780 mclass = GET_MODE_CLASS (mode);
5781 switch (mclass)
5782 {
5783 case MODE_INT:
5784 PROMOTE_MODE (mode, dummy, valtype);
5785 /* FALLTHRU */
5786
5787 case MODE_COMPLEX_INT:
5788 case MODE_VECTOR_INT:
5789 regnum = 0;
5790 break;
5791
5792 case MODE_FLOAT:
5793 regnum = 32;
5794 break;
5795
5796 case MODE_COMPLEX_FLOAT:
5797 {
5798 enum machine_mode cmode = GET_MODE_INNER (mode);
5799
5800 return gen_rtx_PARALLEL
5801 (VOIDmode,
5802 gen_rtvec (2,
5803 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5804 const0_rtx),
5805 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5806 GEN_INT (GET_MODE_SIZE (cmode)))));
5807 }
5808
5809 default:
5810 gcc_unreachable ();
5811 }
5812
5813 return gen_rtx_REG (mode, regnum);
5814 }
5815
5816 /* TCmode complex values are passed by invisible reference. We
5817 should not split these values. */
5818
5819 static bool
5820 alpha_split_complex_arg (const_tree type)
5821 {
5822 return TYPE_MODE (type) != TCmode;
5823 }
5824
5825 static tree
5826 alpha_build_builtin_va_list (void)
5827 {
5828 tree base, ofs, space, record, type_decl;
5829
5830 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5831 return ptr_type_node;
5832
5833 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5834 type_decl = build_decl (BUILTINS_LOCATION,
5835 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5836 TREE_CHAIN (record) = type_decl;
5837 TYPE_NAME (record) = type_decl;
5838
5839 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5840
5841 /* Dummy field to prevent alignment warnings. */
5842 space = build_decl (BUILTINS_LOCATION,
5843 FIELD_DECL, NULL_TREE, integer_type_node);
5844 DECL_FIELD_CONTEXT (space) = record;
5845 DECL_ARTIFICIAL (space) = 1;
5846 DECL_IGNORED_P (space) = 1;
5847
5848 ofs = build_decl (BUILTINS_LOCATION,
5849 FIELD_DECL, get_identifier ("__offset"),
5850 integer_type_node);
5851 DECL_FIELD_CONTEXT (ofs) = record;
5852 TREE_CHAIN (ofs) = space;
5853
5854 base = build_decl (BUILTINS_LOCATION,
5855 FIELD_DECL, get_identifier ("__base"),
5856 ptr_type_node);
5857 DECL_FIELD_CONTEXT (base) = record;
5858 TREE_CHAIN (base) = ofs;
5859
5860 TYPE_FIELDS (record) = base;
5861 layout_type (record);
5862
5863 va_list_gpr_counter_field = ofs;
5864 return record;
5865 }
5866
5867 #if TARGET_ABI_OSF
5868 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5869 and constant additions. */
5870
5871 static gimple
5872 va_list_skip_additions (tree lhs)
5873 {
5874 gimple stmt;
5875
5876 for (;;)
5877 {
5878 enum tree_code code;
5879
5880 stmt = SSA_NAME_DEF_STMT (lhs);
5881
5882 if (gimple_code (stmt) == GIMPLE_PHI)
5883 return stmt;
5884
5885 if (!is_gimple_assign (stmt)
5886 || gimple_assign_lhs (stmt) != lhs)
5887 return NULL;
5888
5889 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5890 return stmt;
5891 code = gimple_assign_rhs_code (stmt);
5892 if (!CONVERT_EXPR_CODE_P (code)
5893 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5894 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5895 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5896 return stmt;
5897
5898 lhs = gimple_assign_rhs1 (stmt);
5899 }
5900 }
5901
5902 /* Check if LHS = RHS statement is
5903 LHS = *(ap.__base + ap.__offset + cst)
5904 or
5905 LHS = *(ap.__base
5906 + ((ap.__offset + cst <= 47)
5907 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5908 If the former, indicate that GPR registers are needed,
5909 if the latter, indicate that FPR registers are needed.
5910
5911 Also look for LHS = (*ptr).field, where ptr is one of the forms
5912 listed above.
5913
5914 On alpha, cfun->va_list_gpr_size is used as size of the needed
5915 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5916 registers are needed and bit 1 set if FPR registers are needed.
5917 Return true if va_list references should not be scanned for the
5918 current statement. */
5919
5920 static bool
5921 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5922 {
5923 tree base, offset, rhs;
5924 int offset_arg = 1;
5925 gimple base_stmt;
5926
5927 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5928 != GIMPLE_SINGLE_RHS)
5929 return false;
5930
5931 rhs = gimple_assign_rhs1 (stmt);
5932 while (handled_component_p (rhs))
5933 rhs = TREE_OPERAND (rhs, 0);
5934 if (TREE_CODE (rhs) != INDIRECT_REF
5935 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5936 return false;
5937
5938 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5939 if (stmt == NULL
5940 || !is_gimple_assign (stmt)
5941 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5942 return false;
5943
5944 base = gimple_assign_rhs1 (stmt);
5945 if (TREE_CODE (base) == SSA_NAME)
5946 {
5947 base_stmt = va_list_skip_additions (base);
5948 if (base_stmt
5949 && is_gimple_assign (base_stmt)
5950 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5951 base = gimple_assign_rhs1 (base_stmt);
5952 }
5953
5954 if (TREE_CODE (base) != COMPONENT_REF
5955 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5956 {
5957 base = gimple_assign_rhs2 (stmt);
5958 if (TREE_CODE (base) == SSA_NAME)
5959 {
5960 base_stmt = va_list_skip_additions (base);
5961 if (base_stmt
5962 && is_gimple_assign (base_stmt)
5963 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5964 base = gimple_assign_rhs1 (base_stmt);
5965 }
5966
5967 if (TREE_CODE (base) != COMPONENT_REF
5968 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5969 return false;
5970
5971 offset_arg = 0;
5972 }
5973
5974 base = get_base_address (base);
5975 if (TREE_CODE (base) != VAR_DECL
5976 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5977 return false;
5978
5979 offset = gimple_op (stmt, 1 + offset_arg);
5980 if (TREE_CODE (offset) == SSA_NAME)
5981 {
5982 gimple offset_stmt = va_list_skip_additions (offset);
5983
5984 if (offset_stmt
5985 && gimple_code (offset_stmt) == GIMPLE_PHI)
5986 {
5987 HOST_WIDE_INT sub;
5988 gimple arg1_stmt, arg2_stmt;
5989 tree arg1, arg2;
5990 enum tree_code code1, code2;
5991
5992 if (gimple_phi_num_args (offset_stmt) != 2)
5993 goto escapes;
5994
5995 arg1_stmt
5996 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5997 arg2_stmt
5998 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5999 if (arg1_stmt == NULL
6000 || !is_gimple_assign (arg1_stmt)
6001 || arg2_stmt == NULL
6002 || !is_gimple_assign (arg2_stmt))
6003 goto escapes;
6004
6005 code1 = gimple_assign_rhs_code (arg1_stmt);
6006 code2 = gimple_assign_rhs_code (arg2_stmt);
6007 if (code1 == COMPONENT_REF
6008 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6009 /* Do nothing. */;
6010 else if (code2 == COMPONENT_REF
6011 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6012 {
6013 gimple tem = arg1_stmt;
6014 code2 = code1;
6015 arg1_stmt = arg2_stmt;
6016 arg2_stmt = tem;
6017 }
6018 else
6019 goto escapes;
6020
6021 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6022 goto escapes;
6023
6024 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6025 if (code2 == MINUS_EXPR)
6026 sub = -sub;
6027 if (sub < -48 || sub > -32)
6028 goto escapes;
6029
6030 arg1 = gimple_assign_rhs1 (arg1_stmt);
6031 arg2 = gimple_assign_rhs1 (arg2_stmt);
6032 if (TREE_CODE (arg2) == SSA_NAME)
6033 {
6034 arg2_stmt = va_list_skip_additions (arg2);
6035 if (arg2_stmt == NULL
6036 || !is_gimple_assign (arg2_stmt)
6037 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6038 goto escapes;
6039 arg2 = gimple_assign_rhs1 (arg2_stmt);
6040 }
6041 if (arg1 != arg2)
6042 goto escapes;
6043
6044 if (TREE_CODE (arg1) != COMPONENT_REF
6045 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6046 || get_base_address (arg1) != base)
6047 goto escapes;
6048
6049 /* Need floating point regs. */
6050 cfun->va_list_fpr_size |= 2;
6051 return false;
6052 }
6053 if (offset_stmt
6054 && is_gimple_assign (offset_stmt)
6055 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6056 offset = gimple_assign_rhs1 (offset_stmt);
6057 }
6058 if (TREE_CODE (offset) != COMPONENT_REF
6059 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6060 || get_base_address (offset) != base)
6061 goto escapes;
6062 else
6063 /* Need general regs. */
6064 cfun->va_list_fpr_size |= 1;
6065 return false;
6066
6067 escapes:
6068 si->va_list_escapes = true;
6069 return false;
6070 }
6071 #endif
6072
6073 /* Perform any needed actions needed for a function that is receiving a
6074 variable number of arguments. */
6075
6076 static void
6077 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6078 tree type, int *pretend_size, int no_rtl)
6079 {
6080 CUMULATIVE_ARGS cum = *pcum;
6081
6082 /* Skip the current argument. */
6083 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6084
6085 #if TARGET_ABI_UNICOSMK
6086 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6087 arguments on the stack. Unfortunately, it doesn't always store the first
6088 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6089 with stdargs as we always have at least one named argument there. */
6090 if (cum.num_reg_words < 6)
6091 {
6092 if (!no_rtl)
6093 {
6094 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6095 emit_insn (gen_arg_home_umk ());
6096 }
6097 *pretend_size = 0;
6098 }
6099 #elif TARGET_ABI_OPEN_VMS
6100 /* For VMS, we allocate space for all 6 arg registers plus a count.
6101
6102 However, if NO registers need to be saved, don't allocate any space.
6103 This is not only because we won't need the space, but because AP
6104 includes the current_pretend_args_size and we don't want to mess up
6105 any ap-relative addresses already made. */
6106 if (cum.num_args < 6)
6107 {
6108 if (!no_rtl)
6109 {
6110 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6111 emit_insn (gen_arg_home ());
6112 }
6113 *pretend_size = 7 * UNITS_PER_WORD;
6114 }
6115 #else
6116 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6117 only push those that are remaining. However, if NO registers need to
6118 be saved, don't allocate any space. This is not only because we won't
6119 need the space, but because AP includes the current_pretend_args_size
6120 and we don't want to mess up any ap-relative addresses already made.
6121
6122 If we are not to use the floating-point registers, save the integer
6123 registers where we would put the floating-point registers. This is
6124 not the most efficient way to implement varargs with just one register
6125 class, but it isn't worth doing anything more efficient in this rare
6126 case. */
6127 if (cum >= 6)
6128 return;
6129
6130 if (!no_rtl)
6131 {
6132 int count;
6133 alias_set_type set = get_varargs_alias_set ();
6134 rtx tmp;
6135
6136 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6137 if (count > 6 - cum)
6138 count = 6 - cum;
6139
6140 /* Detect whether integer registers or floating-point registers
6141 are needed by the detected va_arg statements. See above for
6142 how these values are computed. Note that the "escape" value
6143 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6144 these bits set. */
6145 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6146
6147 if (cfun->va_list_fpr_size & 1)
6148 {
6149 tmp = gen_rtx_MEM (BLKmode,
6150 plus_constant (virtual_incoming_args_rtx,
6151 (cum + 6) * UNITS_PER_WORD));
6152 MEM_NOTRAP_P (tmp) = 1;
6153 set_mem_alias_set (tmp, set);
6154 move_block_from_reg (16 + cum, tmp, count);
6155 }
6156
6157 if (cfun->va_list_fpr_size & 2)
6158 {
6159 tmp = gen_rtx_MEM (BLKmode,
6160 plus_constant (virtual_incoming_args_rtx,
6161 cum * UNITS_PER_WORD));
6162 MEM_NOTRAP_P (tmp) = 1;
6163 set_mem_alias_set (tmp, set);
6164 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6165 }
6166 }
6167 *pretend_size = 12 * UNITS_PER_WORD;
6168 #endif
6169 }
6170
6171 static void
6172 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6173 {
6174 HOST_WIDE_INT offset;
6175 tree t, offset_field, base_field;
6176
6177 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6178 return;
6179
6180 if (TARGET_ABI_UNICOSMK)
6181 std_expand_builtin_va_start (valist, nextarg);
6182
6183 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6184 up by 48, storing fp arg registers in the first 48 bytes, and the
6185 integer arg registers in the next 48 bytes. This is only done,
6186 however, if any integer registers need to be stored.
6187
6188 If no integer registers need be stored, then we must subtract 48
6189 in order to account for the integer arg registers which are counted
6190 in argsize above, but which are not actually stored on the stack.
6191 Must further be careful here about structures straddling the last
6192 integer argument register; that futzes with pretend_args_size,
6193 which changes the meaning of AP. */
6194
6195 if (NUM_ARGS < 6)
6196 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6197 else
6198 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6199
6200 if (TARGET_ABI_OPEN_VMS)
6201 {
6202 nextarg = plus_constant (nextarg, offset);
6203 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6204 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6205 make_tree (ptr_type_node, nextarg));
6206 TREE_SIDE_EFFECTS (t) = 1;
6207
6208 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6209 }
6210 else
6211 {
6212 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6213 offset_field = TREE_CHAIN (base_field);
6214
6215 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6216 valist, base_field, NULL_TREE);
6217 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6218 valist, offset_field, NULL_TREE);
6219
6220 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6221 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6222 size_int (offset));
6223 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6224 TREE_SIDE_EFFECTS (t) = 1;
6225 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6226
6227 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6228 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6229 TREE_SIDE_EFFECTS (t) = 1;
6230 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6231 }
6232 }
6233
6234 static tree
6235 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6236 gimple_seq *pre_p)
6237 {
6238 tree type_size, ptr_type, addend, t, addr;
6239 gimple_seq internal_post;
6240
6241 /* If the type could not be passed in registers, skip the block
6242 reserved for the registers. */
6243 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6244 {
6245 t = build_int_cst (TREE_TYPE (offset), 6*8);
6246 gimplify_assign (offset,
6247 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6248 pre_p);
6249 }
6250
6251 addend = offset;
6252 ptr_type = build_pointer_type (type);
6253
6254 if (TREE_CODE (type) == COMPLEX_TYPE)
6255 {
6256 tree real_part, imag_part, real_temp;
6257
6258 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6259 offset, pre_p);
6260
6261 /* Copy the value into a new temporary, lest the formal temporary
6262 be reused out from under us. */
6263 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6264
6265 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6266 offset, pre_p);
6267
6268 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6269 }
6270 else if (TREE_CODE (type) == REAL_TYPE)
6271 {
6272 tree fpaddend, cond, fourtyeight;
6273
6274 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6275 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6276 addend, fourtyeight);
6277 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6278 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6279 fpaddend, addend);
6280 }
6281
6282 /* Build the final address and force that value into a temporary. */
6283 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6284 fold_convert (sizetype, addend));
6285 internal_post = NULL;
6286 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6287 gimple_seq_add_seq (pre_p, internal_post);
6288
6289 /* Update the offset field. */
6290 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6291 if (type_size == NULL || TREE_OVERFLOW (type_size))
6292 t = size_zero_node;
6293 else
6294 {
6295 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6296 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6297 t = size_binop (MULT_EXPR, t, size_int (8));
6298 }
6299 t = fold_convert (TREE_TYPE (offset), t);
6300 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6301 pre_p);
6302
6303 return build_va_arg_indirect_ref (addr);
6304 }
6305
6306 static tree
6307 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6308 gimple_seq *post_p)
6309 {
6310 tree offset_field, base_field, offset, base, t, r;
6311 bool indirect;
6312
6313 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6314 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6315
6316 base_field = TYPE_FIELDS (va_list_type_node);
6317 offset_field = TREE_CHAIN (base_field);
6318 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6319 valist, base_field, NULL_TREE);
6320 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6321 valist, offset_field, NULL_TREE);
6322
6323 /* Pull the fields of the structure out into temporaries. Since we never
6324 modify the base field, we can use a formal temporary. Sign-extend the
6325 offset field so that it's the proper width for pointer arithmetic. */
6326 base = get_formal_tmp_var (base_field, pre_p);
6327
6328 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6329 offset = get_initialized_tmp_var (t, pre_p, NULL);
6330
6331 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6332 if (indirect)
6333 type = build_pointer_type (type);
6334
6335 /* Find the value. Note that this will be a stable indirection, or
6336 a composite of stable indirections in the case of complex. */
6337 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6338
6339 /* Stuff the offset temporary back into its field. */
6340 gimplify_assign (unshare_expr (offset_field),
6341 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6342
6343 if (indirect)
6344 r = build_va_arg_indirect_ref (r);
6345
6346 return r;
6347 }
6348 \f
6349 /* Builtins. */
6350
6351 enum alpha_builtin
6352 {
6353 ALPHA_BUILTIN_CMPBGE,
6354 ALPHA_BUILTIN_EXTBL,
6355 ALPHA_BUILTIN_EXTWL,
6356 ALPHA_BUILTIN_EXTLL,
6357 ALPHA_BUILTIN_EXTQL,
6358 ALPHA_BUILTIN_EXTWH,
6359 ALPHA_BUILTIN_EXTLH,
6360 ALPHA_BUILTIN_EXTQH,
6361 ALPHA_BUILTIN_INSBL,
6362 ALPHA_BUILTIN_INSWL,
6363 ALPHA_BUILTIN_INSLL,
6364 ALPHA_BUILTIN_INSQL,
6365 ALPHA_BUILTIN_INSWH,
6366 ALPHA_BUILTIN_INSLH,
6367 ALPHA_BUILTIN_INSQH,
6368 ALPHA_BUILTIN_MSKBL,
6369 ALPHA_BUILTIN_MSKWL,
6370 ALPHA_BUILTIN_MSKLL,
6371 ALPHA_BUILTIN_MSKQL,
6372 ALPHA_BUILTIN_MSKWH,
6373 ALPHA_BUILTIN_MSKLH,
6374 ALPHA_BUILTIN_MSKQH,
6375 ALPHA_BUILTIN_UMULH,
6376 ALPHA_BUILTIN_ZAP,
6377 ALPHA_BUILTIN_ZAPNOT,
6378 ALPHA_BUILTIN_AMASK,
6379 ALPHA_BUILTIN_IMPLVER,
6380 ALPHA_BUILTIN_RPCC,
6381 ALPHA_BUILTIN_THREAD_POINTER,
6382 ALPHA_BUILTIN_SET_THREAD_POINTER,
6383
6384 /* TARGET_MAX */
6385 ALPHA_BUILTIN_MINUB8,
6386 ALPHA_BUILTIN_MINSB8,
6387 ALPHA_BUILTIN_MINUW4,
6388 ALPHA_BUILTIN_MINSW4,
6389 ALPHA_BUILTIN_MAXUB8,
6390 ALPHA_BUILTIN_MAXSB8,
6391 ALPHA_BUILTIN_MAXUW4,
6392 ALPHA_BUILTIN_MAXSW4,
6393 ALPHA_BUILTIN_PERR,
6394 ALPHA_BUILTIN_PKLB,
6395 ALPHA_BUILTIN_PKWB,
6396 ALPHA_BUILTIN_UNPKBL,
6397 ALPHA_BUILTIN_UNPKBW,
6398
6399 /* TARGET_CIX */
6400 ALPHA_BUILTIN_CTTZ,
6401 ALPHA_BUILTIN_CTLZ,
6402 ALPHA_BUILTIN_CTPOP,
6403
6404 ALPHA_BUILTIN_max
6405 };
6406
6407 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6408 CODE_FOR_builtin_cmpbge,
6409 CODE_FOR_builtin_extbl,
6410 CODE_FOR_builtin_extwl,
6411 CODE_FOR_builtin_extll,
6412 CODE_FOR_builtin_extql,
6413 CODE_FOR_builtin_extwh,
6414 CODE_FOR_builtin_extlh,
6415 CODE_FOR_builtin_extqh,
6416 CODE_FOR_builtin_insbl,
6417 CODE_FOR_builtin_inswl,
6418 CODE_FOR_builtin_insll,
6419 CODE_FOR_builtin_insql,
6420 CODE_FOR_builtin_inswh,
6421 CODE_FOR_builtin_inslh,
6422 CODE_FOR_builtin_insqh,
6423 CODE_FOR_builtin_mskbl,
6424 CODE_FOR_builtin_mskwl,
6425 CODE_FOR_builtin_mskll,
6426 CODE_FOR_builtin_mskql,
6427 CODE_FOR_builtin_mskwh,
6428 CODE_FOR_builtin_msklh,
6429 CODE_FOR_builtin_mskqh,
6430 CODE_FOR_umuldi3_highpart,
6431 CODE_FOR_builtin_zap,
6432 CODE_FOR_builtin_zapnot,
6433 CODE_FOR_builtin_amask,
6434 CODE_FOR_builtin_implver,
6435 CODE_FOR_builtin_rpcc,
6436 CODE_FOR_load_tp,
6437 CODE_FOR_set_tp,
6438
6439 /* TARGET_MAX */
6440 CODE_FOR_builtin_minub8,
6441 CODE_FOR_builtin_minsb8,
6442 CODE_FOR_builtin_minuw4,
6443 CODE_FOR_builtin_minsw4,
6444 CODE_FOR_builtin_maxub8,
6445 CODE_FOR_builtin_maxsb8,
6446 CODE_FOR_builtin_maxuw4,
6447 CODE_FOR_builtin_maxsw4,
6448 CODE_FOR_builtin_perr,
6449 CODE_FOR_builtin_pklb,
6450 CODE_FOR_builtin_pkwb,
6451 CODE_FOR_builtin_unpkbl,
6452 CODE_FOR_builtin_unpkbw,
6453
6454 /* TARGET_CIX */
6455 CODE_FOR_ctzdi2,
6456 CODE_FOR_clzdi2,
6457 CODE_FOR_popcountdi2
6458 };
6459
6460 struct alpha_builtin_def
6461 {
6462 const char *name;
6463 enum alpha_builtin code;
6464 unsigned int target_mask;
6465 bool is_const;
6466 };
6467
6468 static struct alpha_builtin_def const zero_arg_builtins[] = {
6469 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6470 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6471 };
6472
6473 static struct alpha_builtin_def const one_arg_builtins[] = {
6474 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6475 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6476 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6477 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6478 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6479 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6480 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6481 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6482 };
6483
6484 static struct alpha_builtin_def const two_arg_builtins[] = {
6485 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6486 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6487 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6488 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6489 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6490 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6491 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6492 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6493 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6494 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6495 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6496 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6497 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6498 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6499 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6500 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6501 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6502 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6503 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6504 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6505 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6506 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6507 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6508 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6509 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6510 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6511 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6512 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6513 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6514 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6515 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6516 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6517 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6518 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6519 };
6520
6521 static GTY(()) tree alpha_v8qi_u;
6522 static GTY(()) tree alpha_v8qi_s;
6523 static GTY(()) tree alpha_v4hi_u;
6524 static GTY(()) tree alpha_v4hi_s;
6525
6526 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6527 functions pointed to by P, with function type FTYPE. */
6528
6529 static void
6530 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6531 tree ftype)
6532 {
6533 tree decl;
6534 size_t i;
6535
6536 for (i = 0; i < count; ++i, ++p)
6537 if ((target_flags & p->target_mask) == p->target_mask)
6538 {
6539 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6540 NULL, NULL);
6541 if (p->is_const)
6542 TREE_READONLY (decl) = 1;
6543 TREE_NOTHROW (decl) = 1;
6544 }
6545 }
6546
6547
6548 static void
6549 alpha_init_builtins (void)
6550 {
6551 tree dimode_integer_type_node;
6552 tree ftype, decl;
6553
6554 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6555
6556 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6557 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6558 ftype);
6559
6560 ftype = build_function_type_list (dimode_integer_type_node,
6561 dimode_integer_type_node, NULL_TREE);
6562 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6563 ftype);
6564
6565 ftype = build_function_type_list (dimode_integer_type_node,
6566 dimode_integer_type_node,
6567 dimode_integer_type_node, NULL_TREE);
6568 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6569 ftype);
6570
6571 ftype = build_function_type (ptr_type_node, void_list_node);
6572 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6573 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6574 NULL, NULL);
6575 TREE_NOTHROW (decl) = 1;
6576
6577 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6578 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6579 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6580 NULL, NULL);
6581 TREE_NOTHROW (decl) = 1;
6582
6583 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6584 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6585 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6586 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6587 }
6588
6589 /* Expand an expression EXP that calls a built-in function,
6590 with result going to TARGET if that's convenient
6591 (and in mode MODE if that's convenient).
6592 SUBTARGET may be used as the target for computing one of EXP's operands.
6593 IGNORE is nonzero if the value is to be ignored. */
6594
6595 static rtx
6596 alpha_expand_builtin (tree exp, rtx target,
6597 rtx subtarget ATTRIBUTE_UNUSED,
6598 enum machine_mode mode ATTRIBUTE_UNUSED,
6599 int ignore ATTRIBUTE_UNUSED)
6600 {
6601 #define MAX_ARGS 2
6602
6603 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6604 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6605 tree arg;
6606 call_expr_arg_iterator iter;
6607 enum insn_code icode;
6608 rtx op[MAX_ARGS], pat;
6609 int arity;
6610 bool nonvoid;
6611
6612 if (fcode >= ALPHA_BUILTIN_max)
6613 internal_error ("bad builtin fcode");
6614 icode = code_for_builtin[fcode];
6615 if (icode == 0)
6616 internal_error ("bad builtin fcode");
6617
6618 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6619
6620 arity = 0;
6621 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6622 {
6623 const struct insn_operand_data *insn_op;
6624
6625 if (arg == error_mark_node)
6626 return NULL_RTX;
6627 if (arity > MAX_ARGS)
6628 return NULL_RTX;
6629
6630 insn_op = &insn_data[icode].operand[arity + nonvoid];
6631
6632 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6633
6634 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6635 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6636 arity++;
6637 }
6638
6639 if (nonvoid)
6640 {
6641 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6642 if (!target
6643 || GET_MODE (target) != tmode
6644 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6645 target = gen_reg_rtx (tmode);
6646 }
6647
6648 switch (arity)
6649 {
6650 case 0:
6651 pat = GEN_FCN (icode) (target);
6652 break;
6653 case 1:
6654 if (nonvoid)
6655 pat = GEN_FCN (icode) (target, op[0]);
6656 else
6657 pat = GEN_FCN (icode) (op[0]);
6658 break;
6659 case 2:
6660 pat = GEN_FCN (icode) (target, op[0], op[1]);
6661 break;
6662 default:
6663 gcc_unreachable ();
6664 }
6665 if (!pat)
6666 return NULL_RTX;
6667 emit_insn (pat);
6668
6669 if (nonvoid)
6670 return target;
6671 else
6672 return const0_rtx;
6673 }
6674
6675
6676 /* Several bits below assume HWI >= 64 bits. This should be enforced
6677 by config.gcc. */
6678 #if HOST_BITS_PER_WIDE_INT < 64
6679 # error "HOST_WIDE_INT too small"
6680 #endif
6681
6682 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6683 with an 8-bit output vector. OPINT contains the integer operands; bit N
6684 of OP_CONST is set if OPINT[N] is valid. */
6685
6686 static tree
6687 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6688 {
6689 if (op_const == 3)
6690 {
6691 int i, val;
6692 for (i = 0, val = 0; i < 8; ++i)
6693 {
6694 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6695 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6696 if (c0 >= c1)
6697 val |= 1 << i;
6698 }
6699 return build_int_cst (long_integer_type_node, val);
6700 }
6701 else if (op_const == 2 && opint[1] == 0)
6702 return build_int_cst (long_integer_type_node, 0xff);
6703 return NULL;
6704 }
6705
6706 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6707 specialized form of an AND operation. Other byte manipulation instructions
6708 are defined in terms of this instruction, so this is also used as a
6709 subroutine for other builtins.
6710
6711 OP contains the tree operands; OPINT contains the extracted integer values.
6712 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6713 OPINT may be considered. */
6714
6715 static tree
6716 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6717 long op_const)
6718 {
6719 if (op_const & 2)
6720 {
6721 unsigned HOST_WIDE_INT mask = 0;
6722 int i;
6723
6724 for (i = 0; i < 8; ++i)
6725 if ((opint[1] >> i) & 1)
6726 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6727
6728 if (op_const & 1)
6729 return build_int_cst (long_integer_type_node, opint[0] & mask);
6730
6731 if (op)
6732 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6733 build_int_cst (long_integer_type_node, mask));
6734 }
6735 else if ((op_const & 1) && opint[0] == 0)
6736 return build_int_cst (long_integer_type_node, 0);
6737 return NULL;
6738 }
6739
6740 /* Fold the builtins for the EXT family of instructions. */
6741
6742 static tree
6743 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6744 long op_const, unsigned HOST_WIDE_INT bytemask,
6745 bool is_high)
6746 {
6747 long zap_const = 2;
6748 tree *zap_op = NULL;
6749
6750 if (op_const & 2)
6751 {
6752 unsigned HOST_WIDE_INT loc;
6753
6754 loc = opint[1] & 7;
6755 if (BYTES_BIG_ENDIAN)
6756 loc ^= 7;
6757 loc *= 8;
6758
6759 if (loc != 0)
6760 {
6761 if (op_const & 1)
6762 {
6763 unsigned HOST_WIDE_INT temp = opint[0];
6764 if (is_high)
6765 temp <<= loc;
6766 else
6767 temp >>= loc;
6768 opint[0] = temp;
6769 zap_const = 3;
6770 }
6771 }
6772 else
6773 zap_op = op;
6774 }
6775
6776 opint[1] = bytemask;
6777 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6778 }
6779
6780 /* Fold the builtins for the INS family of instructions. */
6781
6782 static tree
6783 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6784 long op_const, unsigned HOST_WIDE_INT bytemask,
6785 bool is_high)
6786 {
6787 if ((op_const & 1) && opint[0] == 0)
6788 return build_int_cst (long_integer_type_node, 0);
6789
6790 if (op_const & 2)
6791 {
6792 unsigned HOST_WIDE_INT temp, loc, byteloc;
6793 tree *zap_op = NULL;
6794
6795 loc = opint[1] & 7;
6796 if (BYTES_BIG_ENDIAN)
6797 loc ^= 7;
6798 bytemask <<= loc;
6799
6800 temp = opint[0];
6801 if (is_high)
6802 {
6803 byteloc = (64 - (loc * 8)) & 0x3f;
6804 if (byteloc == 0)
6805 zap_op = op;
6806 else
6807 temp >>= byteloc;
6808 bytemask >>= 8;
6809 }
6810 else
6811 {
6812 byteloc = loc * 8;
6813 if (byteloc == 0)
6814 zap_op = op;
6815 else
6816 temp <<= byteloc;
6817 }
6818
6819 opint[0] = temp;
6820 opint[1] = bytemask;
6821 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6822 }
6823
6824 return NULL;
6825 }
6826
6827 static tree
6828 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6829 long op_const, unsigned HOST_WIDE_INT bytemask,
6830 bool is_high)
6831 {
6832 if (op_const & 2)
6833 {
6834 unsigned HOST_WIDE_INT loc;
6835
6836 loc = opint[1] & 7;
6837 if (BYTES_BIG_ENDIAN)
6838 loc ^= 7;
6839 bytemask <<= loc;
6840
6841 if (is_high)
6842 bytemask >>= 8;
6843
6844 opint[1] = bytemask ^ 0xff;
6845 }
6846
6847 return alpha_fold_builtin_zapnot (op, opint, op_const);
6848 }
6849
6850 static tree
6851 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6852 {
6853 switch (op_const)
6854 {
6855 case 3:
6856 {
6857 unsigned HOST_WIDE_INT l;
6858 HOST_WIDE_INT h;
6859
6860 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6861
6862 #if HOST_BITS_PER_WIDE_INT > 64
6863 # error fixme
6864 #endif
6865
6866 return build_int_cst (long_integer_type_node, h);
6867 }
6868
6869 case 1:
6870 opint[1] = opint[0];
6871 /* FALLTHRU */
6872 case 2:
6873 /* Note that (X*1) >> 64 == 0. */
6874 if (opint[1] == 0 || opint[1] == 1)
6875 return build_int_cst (long_integer_type_node, 0);
6876 break;
6877 }
6878 return NULL;
6879 }
6880
6881 static tree
6882 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6883 {
6884 tree op0 = fold_convert (vtype, op[0]);
6885 tree op1 = fold_convert (vtype, op[1]);
6886 tree val = fold_build2 (code, vtype, op0, op1);
6887 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6888 }
6889
6890 static tree
6891 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6892 {
6893 unsigned HOST_WIDE_INT temp = 0;
6894 int i;
6895
6896 if (op_const != 3)
6897 return NULL;
6898
6899 for (i = 0; i < 8; ++i)
6900 {
6901 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6902 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6903 if (a >= b)
6904 temp += a - b;
6905 else
6906 temp += b - a;
6907 }
6908
6909 return build_int_cst (long_integer_type_node, temp);
6910 }
6911
6912 static tree
6913 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6914 {
6915 unsigned HOST_WIDE_INT temp;
6916
6917 if (op_const == 0)
6918 return NULL;
6919
6920 temp = opint[0] & 0xff;
6921 temp |= (opint[0] >> 24) & 0xff00;
6922
6923 return build_int_cst (long_integer_type_node, temp);
6924 }
6925
6926 static tree
6927 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6928 {
6929 unsigned HOST_WIDE_INT temp;
6930
6931 if (op_const == 0)
6932 return NULL;
6933
6934 temp = opint[0] & 0xff;
6935 temp |= (opint[0] >> 8) & 0xff00;
6936 temp |= (opint[0] >> 16) & 0xff0000;
6937 temp |= (opint[0] >> 24) & 0xff000000;
6938
6939 return build_int_cst (long_integer_type_node, temp);
6940 }
6941
6942 static tree
6943 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6944 {
6945 unsigned HOST_WIDE_INT temp;
6946
6947 if (op_const == 0)
6948 return NULL;
6949
6950 temp = opint[0] & 0xff;
6951 temp |= (opint[0] & 0xff00) << 24;
6952
6953 return build_int_cst (long_integer_type_node, temp);
6954 }
6955
6956 static tree
6957 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6958 {
6959 unsigned HOST_WIDE_INT temp;
6960
6961 if (op_const == 0)
6962 return NULL;
6963
6964 temp = opint[0] & 0xff;
6965 temp |= (opint[0] & 0x0000ff00) << 8;
6966 temp |= (opint[0] & 0x00ff0000) << 16;
6967 temp |= (opint[0] & 0xff000000) << 24;
6968
6969 return build_int_cst (long_integer_type_node, temp);
6970 }
6971
6972 static tree
6973 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6974 {
6975 unsigned HOST_WIDE_INT temp;
6976
6977 if (op_const == 0)
6978 return NULL;
6979
6980 if (opint[0] == 0)
6981 temp = 64;
6982 else
6983 temp = exact_log2 (opint[0] & -opint[0]);
6984
6985 return build_int_cst (long_integer_type_node, temp);
6986 }
6987
6988 static tree
6989 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6990 {
6991 unsigned HOST_WIDE_INT temp;
6992
6993 if (op_const == 0)
6994 return NULL;
6995
6996 if (opint[0] == 0)
6997 temp = 64;
6998 else
6999 temp = 64 - floor_log2 (opint[0]) - 1;
7000
7001 return build_int_cst (long_integer_type_node, temp);
7002 }
7003
7004 static tree
7005 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7006 {
7007 unsigned HOST_WIDE_INT temp, op;
7008
7009 if (op_const == 0)
7010 return NULL;
7011
7012 op = opint[0];
7013 temp = 0;
7014 while (op)
7015 temp++, op &= op - 1;
7016
7017 return build_int_cst (long_integer_type_node, temp);
7018 }
7019
7020 /* Fold one of our builtin functions. */
7021
7022 static tree
7023 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
7024 {
7025 tree op[MAX_ARGS], t;
7026 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7027 long op_const = 0, arity = 0;
7028
7029 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7030 {
7031 tree arg = TREE_VALUE (t);
7032 if (arg == error_mark_node)
7033 return NULL;
7034 if (arity >= MAX_ARGS)
7035 return NULL;
7036
7037 op[arity] = arg;
7038 opint[arity] = 0;
7039 if (TREE_CODE (arg) == INTEGER_CST)
7040 {
7041 op_const |= 1L << arity;
7042 opint[arity] = int_cst_value (arg);
7043 }
7044 }
7045
7046 switch (DECL_FUNCTION_CODE (fndecl))
7047 {
7048 case ALPHA_BUILTIN_CMPBGE:
7049 return alpha_fold_builtin_cmpbge (opint, op_const);
7050
7051 case ALPHA_BUILTIN_EXTBL:
7052 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7053 case ALPHA_BUILTIN_EXTWL:
7054 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7055 case ALPHA_BUILTIN_EXTLL:
7056 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7057 case ALPHA_BUILTIN_EXTQL:
7058 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7059 case ALPHA_BUILTIN_EXTWH:
7060 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7061 case ALPHA_BUILTIN_EXTLH:
7062 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7063 case ALPHA_BUILTIN_EXTQH:
7064 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7065
7066 case ALPHA_BUILTIN_INSBL:
7067 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7068 case ALPHA_BUILTIN_INSWL:
7069 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7070 case ALPHA_BUILTIN_INSLL:
7071 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7072 case ALPHA_BUILTIN_INSQL:
7073 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7074 case ALPHA_BUILTIN_INSWH:
7075 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7076 case ALPHA_BUILTIN_INSLH:
7077 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7078 case ALPHA_BUILTIN_INSQH:
7079 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7080
7081 case ALPHA_BUILTIN_MSKBL:
7082 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7083 case ALPHA_BUILTIN_MSKWL:
7084 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7085 case ALPHA_BUILTIN_MSKLL:
7086 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7087 case ALPHA_BUILTIN_MSKQL:
7088 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7089 case ALPHA_BUILTIN_MSKWH:
7090 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7091 case ALPHA_BUILTIN_MSKLH:
7092 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7093 case ALPHA_BUILTIN_MSKQH:
7094 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7095
7096 case ALPHA_BUILTIN_UMULH:
7097 return alpha_fold_builtin_umulh (opint, op_const);
7098
7099 case ALPHA_BUILTIN_ZAP:
7100 opint[1] ^= 0xff;
7101 /* FALLTHRU */
7102 case ALPHA_BUILTIN_ZAPNOT:
7103 return alpha_fold_builtin_zapnot (op, opint, op_const);
7104
7105 case ALPHA_BUILTIN_MINUB8:
7106 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7107 case ALPHA_BUILTIN_MINSB8:
7108 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7109 case ALPHA_BUILTIN_MINUW4:
7110 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7111 case ALPHA_BUILTIN_MINSW4:
7112 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7113 case ALPHA_BUILTIN_MAXUB8:
7114 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7115 case ALPHA_BUILTIN_MAXSB8:
7116 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7117 case ALPHA_BUILTIN_MAXUW4:
7118 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7119 case ALPHA_BUILTIN_MAXSW4:
7120 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7121
7122 case ALPHA_BUILTIN_PERR:
7123 return alpha_fold_builtin_perr (opint, op_const);
7124 case ALPHA_BUILTIN_PKLB:
7125 return alpha_fold_builtin_pklb (opint, op_const);
7126 case ALPHA_BUILTIN_PKWB:
7127 return alpha_fold_builtin_pkwb (opint, op_const);
7128 case ALPHA_BUILTIN_UNPKBL:
7129 return alpha_fold_builtin_unpkbl (opint, op_const);
7130 case ALPHA_BUILTIN_UNPKBW:
7131 return alpha_fold_builtin_unpkbw (opint, op_const);
7132
7133 case ALPHA_BUILTIN_CTTZ:
7134 return alpha_fold_builtin_cttz (opint, op_const);
7135 case ALPHA_BUILTIN_CTLZ:
7136 return alpha_fold_builtin_ctlz (opint, op_const);
7137 case ALPHA_BUILTIN_CTPOP:
7138 return alpha_fold_builtin_ctpop (opint, op_const);
7139
7140 case ALPHA_BUILTIN_AMASK:
7141 case ALPHA_BUILTIN_IMPLVER:
7142 case ALPHA_BUILTIN_RPCC:
7143 case ALPHA_BUILTIN_THREAD_POINTER:
7144 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7145 /* None of these are foldable at compile-time. */
7146 default:
7147 return NULL;
7148 }
7149 }
7150 \f
7151 /* This page contains routines that are used to determine what the function
7152 prologue and epilogue code will do and write them out. */
7153
7154 /* Compute the size of the save area in the stack. */
7155
7156 /* These variables are used for communication between the following functions.
7157 They indicate various things about the current function being compiled
7158 that are used to tell what kind of prologue, epilogue and procedure
7159 descriptor to generate. */
7160
7161 /* Nonzero if we need a stack procedure. */
7162 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7163 static enum alpha_procedure_types alpha_procedure_type;
7164
7165 /* Register number (either FP or SP) that is used to unwind the frame. */
7166 static int vms_unwind_regno;
7167
7168 /* Register number used to save FP. We need not have one for RA since
7169 we don't modify it for register procedures. This is only defined
7170 for register frame procedures. */
7171 static int vms_save_fp_regno;
7172
7173 /* Register number used to reference objects off our PV. */
7174 static int vms_base_regno;
7175
7176 /* Compute register masks for saved registers. */
7177
7178 static void
7179 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7180 {
7181 unsigned long imask = 0;
7182 unsigned long fmask = 0;
7183 unsigned int i;
7184
7185 /* When outputting a thunk, we don't have valid register life info,
7186 but assemble_start_function wants to output .frame and .mask
7187 directives. */
7188 if (cfun->is_thunk)
7189 {
7190 *imaskP = 0;
7191 *fmaskP = 0;
7192 return;
7193 }
7194
7195 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7196 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7197
7198 /* One for every register we have to save. */
7199 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7200 if (! fixed_regs[i] && ! call_used_regs[i]
7201 && df_regs_ever_live_p (i) && i != REG_RA
7202 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7203 {
7204 if (i < 32)
7205 imask |= (1UL << i);
7206 else
7207 fmask |= (1UL << (i - 32));
7208 }
7209
7210 /* We need to restore these for the handler. */
7211 if (crtl->calls_eh_return)
7212 {
7213 for (i = 0; ; ++i)
7214 {
7215 unsigned regno = EH_RETURN_DATA_REGNO (i);
7216 if (regno == INVALID_REGNUM)
7217 break;
7218 imask |= 1UL << regno;
7219 }
7220 }
7221
7222 /* If any register spilled, then spill the return address also. */
7223 /* ??? This is required by the Digital stack unwind specification
7224 and isn't needed if we're doing Dwarf2 unwinding. */
7225 if (imask || fmask || alpha_ra_ever_killed ())
7226 imask |= (1UL << REG_RA);
7227
7228 *imaskP = imask;
7229 *fmaskP = fmask;
7230 }
7231
7232 int
7233 alpha_sa_size (void)
7234 {
7235 unsigned long mask[2];
7236 int sa_size = 0;
7237 int i, j;
7238
7239 alpha_sa_mask (&mask[0], &mask[1]);
7240
7241 if (TARGET_ABI_UNICOSMK)
7242 {
7243 if (mask[0] || mask[1])
7244 sa_size = 14;
7245 }
7246 else
7247 {
7248 for (j = 0; j < 2; ++j)
7249 for (i = 0; i < 32; ++i)
7250 if ((mask[j] >> i) & 1)
7251 sa_size++;
7252 }
7253
7254 if (TARGET_ABI_UNICOSMK)
7255 {
7256 /* We might not need to generate a frame if we don't make any calls
7257 (including calls to __T3E_MISMATCH if this is a vararg function),
7258 don't have any local variables which require stack slots, don't
7259 use alloca and have not determined that we need a frame for other
7260 reasons. */
7261
7262 alpha_procedure_type
7263 = (sa_size || get_frame_size() != 0
7264 || crtl->outgoing_args_size
7265 || cfun->stdarg || cfun->calls_alloca
7266 || frame_pointer_needed)
7267 ? PT_STACK : PT_REGISTER;
7268
7269 /* Always reserve space for saving callee-saved registers if we
7270 need a frame as required by the calling convention. */
7271 if (alpha_procedure_type == PT_STACK)
7272 sa_size = 14;
7273 }
7274 else if (TARGET_ABI_OPEN_VMS)
7275 {
7276 /* Start by assuming we can use a register procedure if we don't
7277 make any calls (REG_RA not used) or need to save any
7278 registers and a stack procedure if we do. */
7279 if ((mask[0] >> REG_RA) & 1)
7280 alpha_procedure_type = PT_STACK;
7281 else if (get_frame_size() != 0)
7282 alpha_procedure_type = PT_REGISTER;
7283 else
7284 alpha_procedure_type = PT_NULL;
7285
7286 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7287 made the final decision on stack procedure vs register procedure. */
7288 if (alpha_procedure_type == PT_STACK)
7289 sa_size -= 2;
7290
7291 /* Decide whether to refer to objects off our PV via FP or PV.
7292 If we need FP for something else or if we receive a nonlocal
7293 goto (which expects PV to contain the value), we must use PV.
7294 Otherwise, start by assuming we can use FP. */
7295
7296 vms_base_regno
7297 = (frame_pointer_needed
7298 || cfun->has_nonlocal_label
7299 || alpha_procedure_type == PT_STACK
7300 || crtl->outgoing_args_size)
7301 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7302
7303 /* If we want to copy PV into FP, we need to find some register
7304 in which to save FP. */
7305
7306 vms_save_fp_regno = -1;
7307 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7308 for (i = 0; i < 32; i++)
7309 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7310 vms_save_fp_regno = i;
7311
7312 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7313 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7314 else if (alpha_procedure_type == PT_NULL)
7315 vms_base_regno = REG_PV;
7316
7317 /* Stack unwinding should be done via FP unless we use it for PV. */
7318 vms_unwind_regno = (vms_base_regno == REG_PV
7319 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7320
7321 /* If this is a stack procedure, allow space for saving FP and RA. */
7322 if (alpha_procedure_type == PT_STACK)
7323 sa_size += 2;
7324 }
7325 else
7326 {
7327 /* Our size must be even (multiple of 16 bytes). */
7328 if (sa_size & 1)
7329 sa_size++;
7330 }
7331
7332 return sa_size * 8;
7333 }
7334
7335 /* Define the offset between two registers, one to be eliminated,
7336 and the other its replacement, at the start of a routine. */
7337
7338 HOST_WIDE_INT
7339 alpha_initial_elimination_offset (unsigned int from,
7340 unsigned int to ATTRIBUTE_UNUSED)
7341 {
7342 HOST_WIDE_INT ret;
7343
7344 ret = alpha_sa_size ();
7345 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7346
7347 switch (from)
7348 {
7349 case FRAME_POINTER_REGNUM:
7350 break;
7351
7352 case ARG_POINTER_REGNUM:
7353 ret += (ALPHA_ROUND (get_frame_size ()
7354 + crtl->args.pretend_args_size)
7355 - crtl->args.pretend_args_size);
7356 break;
7357
7358 default:
7359 gcc_unreachable ();
7360 }
7361
7362 return ret;
7363 }
7364
7365 int
7366 alpha_pv_save_size (void)
7367 {
7368 alpha_sa_size ();
7369 return alpha_procedure_type == PT_STACK ? 8 : 0;
7370 }
7371
7372 int
7373 alpha_using_fp (void)
7374 {
7375 alpha_sa_size ();
7376 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7377 }
7378
7379 #if TARGET_ABI_OPEN_VMS
7380
7381 static const struct attribute_spec vms_attribute_table[] =
7382 {
7383 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7384 { "overlaid", 0, 0, true, false, false, NULL },
7385 { "global", 0, 0, true, false, false, NULL },
7386 { "initialize", 0, 0, true, false, false, NULL },
7387 { NULL, 0, 0, false, false, false, NULL }
7388 };
7389
7390 #endif
7391
7392 static int
7393 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7394 {
7395 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7396 }
7397
7398 int
7399 alpha_find_lo_sum_using_gp (rtx insn)
7400 {
7401 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7402 }
7403
7404 static int
7405 alpha_does_function_need_gp (void)
7406 {
7407 rtx insn;
7408
7409 /* The GP being variable is an OSF abi thing. */
7410 if (! TARGET_ABI_OSF)
7411 return 0;
7412
7413 /* We need the gp to load the address of __mcount. */
7414 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7415 return 1;
7416
7417 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7418 if (cfun->is_thunk)
7419 return 1;
7420
7421 /* The nonlocal receiver pattern assumes that the gp is valid for
7422 the nested function. Reasonable because it's almost always set
7423 correctly already. For the cases where that's wrong, make sure
7424 the nested function loads its gp on entry. */
7425 if (crtl->has_nonlocal_goto)
7426 return 1;
7427
7428 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7429 Even if we are a static function, we still need to do this in case
7430 our address is taken and passed to something like qsort. */
7431
7432 push_topmost_sequence ();
7433 insn = get_insns ();
7434 pop_topmost_sequence ();
7435
7436 for (; insn; insn = NEXT_INSN (insn))
7437 if (INSN_P (insn)
7438 && ! JUMP_TABLE_DATA_P (insn)
7439 && GET_CODE (PATTERN (insn)) != USE
7440 && GET_CODE (PATTERN (insn)) != CLOBBER
7441 && get_attr_usegp (insn))
7442 return 1;
7443
7444 return 0;
7445 }
7446
7447 \f
7448 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7449 sequences. */
7450
7451 static rtx
7452 set_frame_related_p (void)
7453 {
7454 rtx seq = get_insns ();
7455 rtx insn;
7456
7457 end_sequence ();
7458
7459 if (!seq)
7460 return NULL_RTX;
7461
7462 if (INSN_P (seq))
7463 {
7464 insn = seq;
7465 while (insn != NULL_RTX)
7466 {
7467 RTX_FRAME_RELATED_P (insn) = 1;
7468 insn = NEXT_INSN (insn);
7469 }
7470 seq = emit_insn (seq);
7471 }
7472 else
7473 {
7474 seq = emit_insn (seq);
7475 RTX_FRAME_RELATED_P (seq) = 1;
7476 }
7477 return seq;
7478 }
7479
7480 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7481
7482 /* Generates a store with the proper unwind info attached. VALUE is
7483 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7484 contains SP+FRAME_BIAS, and that is the unwind info that should be
7485 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7486 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7487
7488 static void
7489 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7490 HOST_WIDE_INT base_ofs, rtx frame_reg)
7491 {
7492 rtx addr, mem, insn;
7493
7494 addr = plus_constant (base_reg, base_ofs);
7495 mem = gen_rtx_MEM (DImode, addr);
7496 set_mem_alias_set (mem, alpha_sr_alias_set);
7497
7498 insn = emit_move_insn (mem, value);
7499 RTX_FRAME_RELATED_P (insn) = 1;
7500
7501 if (frame_bias || value != frame_reg)
7502 {
7503 if (frame_bias)
7504 {
7505 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7506 mem = gen_rtx_MEM (DImode, addr);
7507 }
7508
7509 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7510 gen_rtx_SET (VOIDmode, mem, frame_reg));
7511 }
7512 }
7513
7514 static void
7515 emit_frame_store (unsigned int regno, rtx base_reg,
7516 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7517 {
7518 rtx reg = gen_rtx_REG (DImode, regno);
7519 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7520 }
7521
7522 /* Write function prologue. */
7523
7524 /* On vms we have two kinds of functions:
7525
7526 - stack frame (PROC_STACK)
7527 these are 'normal' functions with local vars and which are
7528 calling other functions
7529 - register frame (PROC_REGISTER)
7530 keeps all data in registers, needs no stack
7531
7532 We must pass this to the assembler so it can generate the
7533 proper pdsc (procedure descriptor)
7534 This is done with the '.pdesc' command.
7535
7536 On not-vms, we don't really differentiate between the two, as we can
7537 simply allocate stack without saving registers. */
7538
7539 void
7540 alpha_expand_prologue (void)
7541 {
7542 /* Registers to save. */
7543 unsigned long imask = 0;
7544 unsigned long fmask = 0;
7545 /* Stack space needed for pushing registers clobbered by us. */
7546 HOST_WIDE_INT sa_size;
7547 /* Complete stack size needed. */
7548 HOST_WIDE_INT frame_size;
7549 /* Offset from base reg to register save area. */
7550 HOST_WIDE_INT reg_offset;
7551 rtx sa_reg;
7552 int i;
7553
7554 sa_size = alpha_sa_size ();
7555
7556 frame_size = get_frame_size ();
7557 if (TARGET_ABI_OPEN_VMS)
7558 frame_size = ALPHA_ROUND (sa_size
7559 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7560 + frame_size
7561 + crtl->args.pretend_args_size);
7562 else if (TARGET_ABI_UNICOSMK)
7563 /* We have to allocate space for the DSIB if we generate a frame. */
7564 frame_size = ALPHA_ROUND (sa_size
7565 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7566 + ALPHA_ROUND (frame_size
7567 + crtl->outgoing_args_size);
7568 else
7569 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7570 + sa_size
7571 + ALPHA_ROUND (frame_size
7572 + crtl->args.pretend_args_size));
7573
7574 if (TARGET_ABI_OPEN_VMS)
7575 reg_offset = 8;
7576 else
7577 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7578
7579 alpha_sa_mask (&imask, &fmask);
7580
7581 /* Emit an insn to reload GP, if needed. */
7582 if (TARGET_ABI_OSF)
7583 {
7584 alpha_function_needs_gp = alpha_does_function_need_gp ();
7585 if (alpha_function_needs_gp)
7586 emit_insn (gen_prologue_ldgp ());
7587 }
7588
7589 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7590 the call to mcount ourselves, rather than having the linker do it
7591 magically in response to -pg. Since _mcount has special linkage,
7592 don't represent the call as a call. */
7593 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7594 emit_insn (gen_prologue_mcount ());
7595
7596 if (TARGET_ABI_UNICOSMK)
7597 unicosmk_gen_dsib (&imask);
7598
7599 /* Adjust the stack by the frame size. If the frame size is > 4096
7600 bytes, we need to be sure we probe somewhere in the first and last
7601 4096 bytes (we can probably get away without the latter test) and
7602 every 8192 bytes in between. If the frame size is > 32768, we
7603 do this in a loop. Otherwise, we generate the explicit probe
7604 instructions.
7605
7606 Note that we are only allowed to adjust sp once in the prologue. */
7607
7608 if (frame_size <= 32768)
7609 {
7610 if (frame_size > 4096)
7611 {
7612 int probed;
7613
7614 for (probed = 4096; probed < frame_size; probed += 8192)
7615 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7616 ? -probed + 64
7617 : -probed)));
7618
7619 /* We only have to do this probe if we aren't saving registers. */
7620 if (sa_size == 0 && frame_size > probed - 4096)
7621 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7622 }
7623
7624 if (frame_size != 0)
7625 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7626 GEN_INT (TARGET_ABI_UNICOSMK
7627 ? -frame_size + 64
7628 : -frame_size))));
7629 }
7630 else
7631 {
7632 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7633 number of 8192 byte blocks to probe. We then probe each block
7634 in the loop and then set SP to the proper location. If the
7635 amount remaining is > 4096, we have to do one more probe if we
7636 are not saving any registers. */
7637
7638 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7639 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7640 rtx ptr = gen_rtx_REG (DImode, 22);
7641 rtx count = gen_rtx_REG (DImode, 23);
7642 rtx seq;
7643
7644 emit_move_insn (count, GEN_INT (blocks));
7645 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7646 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7647
7648 /* Because of the difficulty in emitting a new basic block this
7649 late in the compilation, generate the loop as a single insn. */
7650 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7651
7652 if (leftover > 4096 && sa_size == 0)
7653 {
7654 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7655 MEM_VOLATILE_P (last) = 1;
7656 emit_move_insn (last, const0_rtx);
7657 }
7658
7659 if (TARGET_ABI_WINDOWS_NT)
7660 {
7661 /* For NT stack unwind (done by 'reverse execution'), it's
7662 not OK to take the result of a loop, even though the value
7663 is already in ptr, so we reload it via a single operation
7664 and subtract it to sp.
7665
7666 Yes, that's correct -- we have to reload the whole constant
7667 into a temporary via ldah+lda then subtract from sp. */
7668
7669 HOST_WIDE_INT lo, hi;
7670 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7671 hi = frame_size - lo;
7672
7673 emit_move_insn (ptr, GEN_INT (hi));
7674 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7675 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7676 ptr));
7677 }
7678 else
7679 {
7680 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7681 GEN_INT (-leftover)));
7682 }
7683
7684 /* This alternative is special, because the DWARF code cannot
7685 possibly intuit through the loop above. So we invent this
7686 note it looks at instead. */
7687 RTX_FRAME_RELATED_P (seq) = 1;
7688 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7689 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7690 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7691 GEN_INT (TARGET_ABI_UNICOSMK
7692 ? -frame_size + 64
7693 : -frame_size))));
7694 }
7695
7696 if (!TARGET_ABI_UNICOSMK)
7697 {
7698 HOST_WIDE_INT sa_bias = 0;
7699
7700 /* Cope with very large offsets to the register save area. */
7701 sa_reg = stack_pointer_rtx;
7702 if (reg_offset + sa_size > 0x8000)
7703 {
7704 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7705 rtx sa_bias_rtx;
7706
7707 if (low + sa_size <= 0x8000)
7708 sa_bias = reg_offset - low, reg_offset = low;
7709 else
7710 sa_bias = reg_offset, reg_offset = 0;
7711
7712 sa_reg = gen_rtx_REG (DImode, 24);
7713 sa_bias_rtx = GEN_INT (sa_bias);
7714
7715 if (add_operand (sa_bias_rtx, DImode))
7716 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7717 else
7718 {
7719 emit_move_insn (sa_reg, sa_bias_rtx);
7720 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7721 }
7722 }
7723
7724 /* Save regs in stack order. Beginning with VMS PV. */
7725 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7726 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7727
7728 /* Save register RA next. */
7729 if (imask & (1UL << REG_RA))
7730 {
7731 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7732 imask &= ~(1UL << REG_RA);
7733 reg_offset += 8;
7734 }
7735
7736 /* Now save any other registers required to be saved. */
7737 for (i = 0; i < 31; i++)
7738 if (imask & (1UL << i))
7739 {
7740 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7741 reg_offset += 8;
7742 }
7743
7744 for (i = 0; i < 31; i++)
7745 if (fmask & (1UL << i))
7746 {
7747 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7748 reg_offset += 8;
7749 }
7750 }
7751 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7752 {
7753 /* The standard frame on the T3E includes space for saving registers.
7754 We just have to use it. We don't have to save the return address and
7755 the old frame pointer here - they are saved in the DSIB. */
7756
7757 reg_offset = -56;
7758 for (i = 9; i < 15; i++)
7759 if (imask & (1UL << i))
7760 {
7761 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7762 reg_offset -= 8;
7763 }
7764 for (i = 2; i < 10; i++)
7765 if (fmask & (1UL << i))
7766 {
7767 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7768 reg_offset -= 8;
7769 }
7770 }
7771
7772 if (TARGET_ABI_OPEN_VMS)
7773 {
7774 /* Register frame procedures save the fp. */
7775 if (alpha_procedure_type == PT_REGISTER)
7776 {
7777 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7778 hard_frame_pointer_rtx);
7779 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7780 RTX_FRAME_RELATED_P (insn) = 1;
7781 }
7782
7783 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7784 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7785 gen_rtx_REG (DImode, REG_PV)));
7786
7787 if (alpha_procedure_type != PT_NULL
7788 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7789 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7790
7791 /* If we have to allocate space for outgoing args, do it now. */
7792 if (crtl->outgoing_args_size != 0)
7793 {
7794 rtx seq
7795 = emit_move_insn (stack_pointer_rtx,
7796 plus_constant
7797 (hard_frame_pointer_rtx,
7798 - (ALPHA_ROUND
7799 (crtl->outgoing_args_size))));
7800
7801 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7802 if ! frame_pointer_needed. Setting the bit will change the CFA
7803 computation rule to use sp again, which would be wrong if we had
7804 frame_pointer_needed, as this means sp might move unpredictably
7805 later on.
7806
7807 Also, note that
7808 frame_pointer_needed
7809 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7810 and
7811 crtl->outgoing_args_size != 0
7812 => alpha_procedure_type != PT_NULL,
7813
7814 so when we are not setting the bit here, we are guaranteed to
7815 have emitted an FRP frame pointer update just before. */
7816 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7817 }
7818 }
7819 else if (!TARGET_ABI_UNICOSMK)
7820 {
7821 /* If we need a frame pointer, set it from the stack pointer. */
7822 if (frame_pointer_needed)
7823 {
7824 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7825 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7826 else
7827 /* This must always be the last instruction in the
7828 prologue, thus we emit a special move + clobber. */
7829 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7830 stack_pointer_rtx, sa_reg)));
7831 }
7832 }
7833
7834 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7835 the prologue, for exception handling reasons, we cannot do this for
7836 any insn that might fault. We could prevent this for mems with a
7837 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7838 have to prevent all such scheduling with a blockage.
7839
7840 Linux, on the other hand, never bothered to implement OSF/1's
7841 exception handling, and so doesn't care about such things. Anyone
7842 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7843
7844 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7845 emit_insn (gen_blockage ());
7846 }
7847
7848 /* Count the number of .file directives, so that .loc is up to date. */
7849 int num_source_filenames = 0;
7850
7851 /* Output the textual info surrounding the prologue. */
7852
7853 void
7854 alpha_start_function (FILE *file, const char *fnname,
7855 tree decl ATTRIBUTE_UNUSED)
7856 {
7857 unsigned long imask = 0;
7858 unsigned long fmask = 0;
7859 /* Stack space needed for pushing registers clobbered by us. */
7860 HOST_WIDE_INT sa_size;
7861 /* Complete stack size needed. */
7862 unsigned HOST_WIDE_INT frame_size;
7863 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7864 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7865 ? 524288
7866 : 1UL << 31;
7867 /* Offset from base reg to register save area. */
7868 HOST_WIDE_INT reg_offset;
7869 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7870 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7871 int i;
7872
7873 /* Don't emit an extern directive for functions defined in the same file. */
7874 if (TARGET_ABI_UNICOSMK)
7875 {
7876 tree name_tree;
7877 name_tree = get_identifier (fnname);
7878 TREE_ASM_WRITTEN (name_tree) = 1;
7879 }
7880
7881 #if TARGET_ABI_OPEN_VMS
7882 if (vms_debug_main
7883 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7884 {
7885 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7886 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7887 switch_to_section (text_section);
7888 vms_debug_main = NULL;
7889 }
7890 #endif
7891
7892 alpha_fnname = fnname;
7893 sa_size = alpha_sa_size ();
7894
7895 frame_size = get_frame_size ();
7896 if (TARGET_ABI_OPEN_VMS)
7897 frame_size = ALPHA_ROUND (sa_size
7898 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7899 + frame_size
7900 + crtl->args.pretend_args_size);
7901 else if (TARGET_ABI_UNICOSMK)
7902 frame_size = ALPHA_ROUND (sa_size
7903 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7904 + ALPHA_ROUND (frame_size
7905 + crtl->outgoing_args_size);
7906 else
7907 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7908 + sa_size
7909 + ALPHA_ROUND (frame_size
7910 + crtl->args.pretend_args_size));
7911
7912 if (TARGET_ABI_OPEN_VMS)
7913 reg_offset = 8;
7914 else
7915 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7916
7917 alpha_sa_mask (&imask, &fmask);
7918
7919 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7920 We have to do that before the .ent directive as we cannot switch
7921 files within procedures with native ecoff because line numbers are
7922 linked to procedure descriptors.
7923 Outputting the lineno helps debugging of one line functions as they
7924 would otherwise get no line number at all. Please note that we would
7925 like to put out last_linenum from final.c, but it is not accessible. */
7926
7927 if (write_symbols == SDB_DEBUG)
7928 {
7929 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7930 ASM_OUTPUT_SOURCE_FILENAME (file,
7931 DECL_SOURCE_FILE (current_function_decl));
7932 #endif
7933 #ifdef SDB_OUTPUT_SOURCE_LINE
7934 if (debug_info_level != DINFO_LEVEL_TERSE)
7935 SDB_OUTPUT_SOURCE_LINE (file,
7936 DECL_SOURCE_LINE (current_function_decl));
7937 #endif
7938 }
7939
7940 /* Issue function start and label. */
7941 if (TARGET_ABI_OPEN_VMS
7942 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7943 {
7944 fputs ("\t.ent ", file);
7945 assemble_name (file, fnname);
7946 putc ('\n', file);
7947
7948 /* If the function needs GP, we'll write the "..ng" label there.
7949 Otherwise, do it here. */
7950 if (TARGET_ABI_OSF
7951 && ! alpha_function_needs_gp
7952 && ! cfun->is_thunk)
7953 {
7954 putc ('$', file);
7955 assemble_name (file, fnname);
7956 fputs ("..ng:\n", file);
7957 }
7958 }
7959 /* Nested functions on VMS that are potentially called via trampoline
7960 get a special transfer entry point that loads the called functions
7961 procedure descriptor and static chain. */
7962 if (TARGET_ABI_OPEN_VMS
7963 && !TREE_PUBLIC (decl)
7964 && DECL_CONTEXT (decl)
7965 && !TYPE_P (DECL_CONTEXT (decl)))
7966 {
7967 strcpy (tramp_label, fnname);
7968 strcat (tramp_label, "..tr");
7969 ASM_OUTPUT_LABEL (file, tramp_label);
7970 fprintf (file, "\tldq $1,24($27)\n");
7971 fprintf (file, "\tldq $27,16($27)\n");
7972 }
7973
7974 strcpy (entry_label, fnname);
7975 if (TARGET_ABI_OPEN_VMS)
7976 strcat (entry_label, "..en");
7977
7978 /* For public functions, the label must be globalized by appending an
7979 additional colon. */
7980 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7981 strcat (entry_label, ":");
7982
7983 ASM_OUTPUT_LABEL (file, entry_label);
7984 inside_function = TRUE;
7985
7986 if (TARGET_ABI_OPEN_VMS)
7987 fprintf (file, "\t.base $%d\n", vms_base_regno);
7988
7989 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7990 && !flag_inhibit_size_directive)
7991 {
7992 /* Set flags in procedure descriptor to request IEEE-conformant
7993 math-library routines. The value we set it to is PDSC_EXC_IEEE
7994 (/usr/include/pdsc.h). */
7995 fputs ("\t.eflag 48\n", file);
7996 }
7997
7998 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7999 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8000 alpha_arg_offset = -frame_size + 48;
8001
8002 /* Describe our frame. If the frame size is larger than an integer,
8003 print it as zero to avoid an assembler error. We won't be
8004 properly describing such a frame, but that's the best we can do. */
8005 if (TARGET_ABI_UNICOSMK)
8006 ;
8007 else if (TARGET_ABI_OPEN_VMS)
8008 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8009 HOST_WIDE_INT_PRINT_DEC "\n",
8010 vms_unwind_regno,
8011 frame_size >= (1UL << 31) ? 0 : frame_size,
8012 reg_offset);
8013 else if (!flag_inhibit_size_directive)
8014 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8015 (frame_pointer_needed
8016 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8017 frame_size >= max_frame_size ? 0 : frame_size,
8018 crtl->args.pretend_args_size);
8019
8020 /* Describe which registers were spilled. */
8021 if (TARGET_ABI_UNICOSMK)
8022 ;
8023 else if (TARGET_ABI_OPEN_VMS)
8024 {
8025 if (imask)
8026 /* ??? Does VMS care if mask contains ra? The old code didn't
8027 set it, so I don't here. */
8028 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8029 if (fmask)
8030 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8031 if (alpha_procedure_type == PT_REGISTER)
8032 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8033 }
8034 else if (!flag_inhibit_size_directive)
8035 {
8036 if (imask)
8037 {
8038 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8039 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8040
8041 for (i = 0; i < 32; ++i)
8042 if (imask & (1UL << i))
8043 reg_offset += 8;
8044 }
8045
8046 if (fmask)
8047 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8048 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8049 }
8050
8051 #if TARGET_ABI_OPEN_VMS
8052 /* Ifdef'ed cause link_section are only available then. */
8053 switch_to_section (readonly_data_section);
8054 fprintf (file, "\t.align 3\n");
8055 assemble_name (file, fnname); fputs ("..na:\n", file);
8056 fputs ("\t.ascii \"", file);
8057 assemble_name (file, fnname);
8058 fputs ("\\0\"\n", file);
8059 alpha_need_linkage (fnname, 1);
8060 switch_to_section (text_section);
8061 #endif
8062 }
8063
8064 /* Emit the .prologue note at the scheduled end of the prologue. */
8065
8066 static void
8067 alpha_output_function_end_prologue (FILE *file)
8068 {
8069 if (TARGET_ABI_UNICOSMK)
8070 ;
8071 else if (TARGET_ABI_OPEN_VMS)
8072 fputs ("\t.prologue\n", file);
8073 else if (TARGET_ABI_WINDOWS_NT)
8074 fputs ("\t.prologue 0\n", file);
8075 else if (!flag_inhibit_size_directive)
8076 fprintf (file, "\t.prologue %d\n",
8077 alpha_function_needs_gp || cfun->is_thunk);
8078 }
8079
8080 /* Write function epilogue. */
8081
8082 void
8083 alpha_expand_epilogue (void)
8084 {
8085 /* Registers to save. */
8086 unsigned long imask = 0;
8087 unsigned long fmask = 0;
8088 /* Stack space needed for pushing registers clobbered by us. */
8089 HOST_WIDE_INT sa_size;
8090 /* Complete stack size needed. */
8091 HOST_WIDE_INT frame_size;
8092 /* Offset from base reg to register save area. */
8093 HOST_WIDE_INT reg_offset;
8094 int fp_is_frame_pointer, fp_offset;
8095 rtx sa_reg, sa_reg_exp = NULL;
8096 rtx sp_adj1, sp_adj2, mem, reg, insn;
8097 rtx eh_ofs;
8098 rtx cfa_restores = NULL_RTX;
8099 int i;
8100
8101 sa_size = alpha_sa_size ();
8102
8103 frame_size = get_frame_size ();
8104 if (TARGET_ABI_OPEN_VMS)
8105 frame_size = ALPHA_ROUND (sa_size
8106 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8107 + frame_size
8108 + crtl->args.pretend_args_size);
8109 else if (TARGET_ABI_UNICOSMK)
8110 frame_size = ALPHA_ROUND (sa_size
8111 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8112 + ALPHA_ROUND (frame_size
8113 + crtl->outgoing_args_size);
8114 else
8115 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8116 + sa_size
8117 + ALPHA_ROUND (frame_size
8118 + crtl->args.pretend_args_size));
8119
8120 if (TARGET_ABI_OPEN_VMS)
8121 {
8122 if (alpha_procedure_type == PT_STACK)
8123 reg_offset = 8;
8124 else
8125 reg_offset = 0;
8126 }
8127 else
8128 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8129
8130 alpha_sa_mask (&imask, &fmask);
8131
8132 fp_is_frame_pointer
8133 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8134 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8135 fp_offset = 0;
8136 sa_reg = stack_pointer_rtx;
8137
8138 if (crtl->calls_eh_return)
8139 eh_ofs = EH_RETURN_STACKADJ_RTX;
8140 else
8141 eh_ofs = NULL_RTX;
8142
8143 if (!TARGET_ABI_UNICOSMK && sa_size)
8144 {
8145 /* If we have a frame pointer, restore SP from it. */
8146 if ((TARGET_ABI_OPEN_VMS
8147 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8148 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8149 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8150
8151 /* Cope with very large offsets to the register save area. */
8152 if (reg_offset + sa_size > 0x8000)
8153 {
8154 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8155 HOST_WIDE_INT bias;
8156
8157 if (low + sa_size <= 0x8000)
8158 bias = reg_offset - low, reg_offset = low;
8159 else
8160 bias = reg_offset, reg_offset = 0;
8161
8162 sa_reg = gen_rtx_REG (DImode, 22);
8163 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8164
8165 emit_move_insn (sa_reg, sa_reg_exp);
8166 }
8167
8168 /* Restore registers in order, excepting a true frame pointer. */
8169
8170 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8171 if (! eh_ofs)
8172 set_mem_alias_set (mem, alpha_sr_alias_set);
8173 reg = gen_rtx_REG (DImode, REG_RA);
8174 emit_move_insn (reg, mem);
8175 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8176
8177 reg_offset += 8;
8178 imask &= ~(1UL << REG_RA);
8179
8180 for (i = 0; i < 31; ++i)
8181 if (imask & (1UL << i))
8182 {
8183 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8184 fp_offset = reg_offset;
8185 else
8186 {
8187 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8188 set_mem_alias_set (mem, alpha_sr_alias_set);
8189 reg = gen_rtx_REG (DImode, i);
8190 emit_move_insn (reg, mem);
8191 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8192 cfa_restores);
8193 }
8194 reg_offset += 8;
8195 }
8196
8197 for (i = 0; i < 31; ++i)
8198 if (fmask & (1UL << i))
8199 {
8200 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8201 set_mem_alias_set (mem, alpha_sr_alias_set);
8202 reg = gen_rtx_REG (DFmode, i+32);
8203 emit_move_insn (reg, mem);
8204 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8205 reg_offset += 8;
8206 }
8207 }
8208 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8209 {
8210 /* Restore callee-saved general-purpose registers. */
8211
8212 reg_offset = -56;
8213
8214 for (i = 9; i < 15; i++)
8215 if (imask & (1UL << i))
8216 {
8217 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8218 reg_offset));
8219 set_mem_alias_set (mem, alpha_sr_alias_set);
8220 reg = gen_rtx_REG (DImode, i);
8221 emit_move_insn (reg, mem);
8222 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8223 reg_offset -= 8;
8224 }
8225
8226 for (i = 2; i < 10; i++)
8227 if (fmask & (1UL << i))
8228 {
8229 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8230 reg_offset));
8231 set_mem_alias_set (mem, alpha_sr_alias_set);
8232 reg = gen_rtx_REG (DFmode, i+32);
8233 emit_move_insn (reg, mem);
8234 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8235 reg_offset -= 8;
8236 }
8237
8238 /* Restore the return address from the DSIB. */
8239 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8240 set_mem_alias_set (mem, alpha_sr_alias_set);
8241 reg = gen_rtx_REG (DImode, REG_RA);
8242 emit_move_insn (reg, mem);
8243 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8244 }
8245
8246 if (frame_size || eh_ofs)
8247 {
8248 sp_adj1 = stack_pointer_rtx;
8249
8250 if (eh_ofs)
8251 {
8252 sp_adj1 = gen_rtx_REG (DImode, 23);
8253 emit_move_insn (sp_adj1,
8254 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8255 }
8256
8257 /* If the stack size is large, begin computation into a temporary
8258 register so as not to interfere with a potential fp restore,
8259 which must be consecutive with an SP restore. */
8260 if (frame_size < 32768
8261 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8262 sp_adj2 = GEN_INT (frame_size);
8263 else if (TARGET_ABI_UNICOSMK)
8264 {
8265 sp_adj1 = gen_rtx_REG (DImode, 23);
8266 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8267 sp_adj2 = const0_rtx;
8268 }
8269 else if (frame_size < 0x40007fffL)
8270 {
8271 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8272
8273 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8274 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8275 sp_adj1 = sa_reg;
8276 else
8277 {
8278 sp_adj1 = gen_rtx_REG (DImode, 23);
8279 emit_move_insn (sp_adj1, sp_adj2);
8280 }
8281 sp_adj2 = GEN_INT (low);
8282 }
8283 else
8284 {
8285 rtx tmp = gen_rtx_REG (DImode, 23);
8286 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8287 if (!sp_adj2)
8288 {
8289 /* We can't drop new things to memory this late, afaik,
8290 so build it up by pieces. */
8291 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8292 -(frame_size < 0));
8293 gcc_assert (sp_adj2);
8294 }
8295 }
8296
8297 /* From now on, things must be in order. So emit blockages. */
8298
8299 /* Restore the frame pointer. */
8300 if (TARGET_ABI_UNICOSMK)
8301 {
8302 emit_insn (gen_blockage ());
8303 mem = gen_rtx_MEM (DImode,
8304 plus_constant (hard_frame_pointer_rtx, -16));
8305 set_mem_alias_set (mem, alpha_sr_alias_set);
8306 emit_move_insn (hard_frame_pointer_rtx, mem);
8307 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8308 hard_frame_pointer_rtx, cfa_restores);
8309 }
8310 else if (fp_is_frame_pointer)
8311 {
8312 emit_insn (gen_blockage ());
8313 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8314 set_mem_alias_set (mem, alpha_sr_alias_set);
8315 emit_move_insn (hard_frame_pointer_rtx, mem);
8316 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8317 hard_frame_pointer_rtx, cfa_restores);
8318 }
8319 else if (TARGET_ABI_OPEN_VMS)
8320 {
8321 emit_insn (gen_blockage ());
8322 emit_move_insn (hard_frame_pointer_rtx,
8323 gen_rtx_REG (DImode, vms_save_fp_regno));
8324 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8325 hard_frame_pointer_rtx, cfa_restores);
8326 }
8327
8328 /* Restore the stack pointer. */
8329 emit_insn (gen_blockage ());
8330 if (sp_adj2 == const0_rtx)
8331 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8332 else
8333 insn = emit_move_insn (stack_pointer_rtx,
8334 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8335 REG_NOTES (insn) = cfa_restores;
8336 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8337 RTX_FRAME_RELATED_P (insn) = 1;
8338 }
8339 else
8340 {
8341 gcc_assert (cfa_restores == NULL);
8342
8343 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8344 {
8345 emit_insn (gen_blockage ());
8346 insn = emit_move_insn (hard_frame_pointer_rtx,
8347 gen_rtx_REG (DImode, vms_save_fp_regno));
8348 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8349 RTX_FRAME_RELATED_P (insn) = 1;
8350 }
8351 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8352 {
8353 /* Decrement the frame pointer if the function does not have a
8354 frame. */
8355 emit_insn (gen_blockage ());
8356 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8357 hard_frame_pointer_rtx, constm1_rtx));
8358 }
8359 }
8360 }
8361 \f
8362 /* Output the rest of the textual info surrounding the epilogue. */
8363
8364 void
8365 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8366 {
8367 rtx insn;
8368
8369 /* We output a nop after noreturn calls at the very end of the function to
8370 ensure that the return address always remains in the caller's code range,
8371 as not doing so might confuse unwinding engines. */
8372 insn = get_last_insn ();
8373 if (!INSN_P (insn))
8374 insn = prev_active_insn (insn);
8375 if (CALL_P (insn))
8376 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8377
8378 #if TARGET_ABI_OSF
8379 if (cfun->is_thunk)
8380 free_after_compilation (cfun);
8381 #endif
8382
8383 #if TARGET_ABI_OPEN_VMS
8384 alpha_write_linkage (file, fnname, decl);
8385 #endif
8386
8387 /* End the function. */
8388 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8389 {
8390 fputs ("\t.end ", file);
8391 assemble_name (file, fnname);
8392 putc ('\n', file);
8393 }
8394 inside_function = FALSE;
8395
8396 /* Output jump tables and the static subroutine information block. */
8397 if (TARGET_ABI_UNICOSMK)
8398 {
8399 unicosmk_output_ssib (file, fnname);
8400 unicosmk_output_deferred_case_vectors (file);
8401 }
8402 }
8403
8404 #if TARGET_ABI_OSF
8405 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8406
8407 In order to avoid the hordes of differences between generated code
8408 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8409 lots of code loading up large constants, generate rtl and emit it
8410 instead of going straight to text.
8411
8412 Not sure why this idea hasn't been explored before... */
8413
8414 static void
8415 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8416 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8417 tree function)
8418 {
8419 HOST_WIDE_INT hi, lo;
8420 rtx this_rtx, insn, funexp;
8421
8422 gcc_assert (cfun->is_thunk);
8423
8424 /* We always require a valid GP. */
8425 emit_insn (gen_prologue_ldgp ());
8426 emit_note (NOTE_INSN_PROLOGUE_END);
8427
8428 /* Find the "this" pointer. If the function returns a structure,
8429 the structure return pointer is in $16. */
8430 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8431 this_rtx = gen_rtx_REG (Pmode, 17);
8432 else
8433 this_rtx = gen_rtx_REG (Pmode, 16);
8434
8435 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8436 entire constant for the add. */
8437 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8438 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8439 if (hi + lo == delta)
8440 {
8441 if (hi)
8442 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8443 if (lo)
8444 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8445 }
8446 else
8447 {
8448 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8449 delta, -(delta < 0));
8450 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8451 }
8452
8453 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8454 if (vcall_offset)
8455 {
8456 rtx tmp, tmp2;
8457
8458 tmp = gen_rtx_REG (Pmode, 0);
8459 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8460
8461 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8462 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8463 if (hi + lo == vcall_offset)
8464 {
8465 if (hi)
8466 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8467 }
8468 else
8469 {
8470 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8471 vcall_offset, -(vcall_offset < 0));
8472 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8473 lo = 0;
8474 }
8475 if (lo)
8476 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8477 else
8478 tmp2 = tmp;
8479 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8480
8481 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8482 }
8483
8484 /* Generate a tail call to the target function. */
8485 if (! TREE_USED (function))
8486 {
8487 assemble_external (function);
8488 TREE_USED (function) = 1;
8489 }
8490 funexp = XEXP (DECL_RTL (function), 0);
8491 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8492 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8493 SIBLING_CALL_P (insn) = 1;
8494
8495 /* Run just enough of rest_of_compilation to get the insns emitted.
8496 There's not really enough bulk here to make other passes such as
8497 instruction scheduling worth while. Note that use_thunk calls
8498 assemble_start_function and assemble_end_function. */
8499 insn = get_insns ();
8500 insn_locators_alloc ();
8501 shorten_branches (insn);
8502 final_start_function (insn, file, 1);
8503 final (insn, file, 1);
8504 final_end_function ();
8505 }
8506 #endif /* TARGET_ABI_OSF */
8507 \f
8508 /* Debugging support. */
8509
8510 #include "gstab.h"
8511
8512 /* Count the number of sdb related labels are generated (to find block
8513 start and end boundaries). */
8514
8515 int sdb_label_count = 0;
8516
8517 /* Name of the file containing the current function. */
8518
8519 static const char *current_function_file = "";
8520
8521 /* Offsets to alpha virtual arg/local debugging pointers. */
8522
8523 long alpha_arg_offset;
8524 long alpha_auto_offset;
8525 \f
8526 /* Emit a new filename to a stream. */
8527
8528 void
8529 alpha_output_filename (FILE *stream, const char *name)
8530 {
8531 static int first_time = TRUE;
8532
8533 if (first_time)
8534 {
8535 first_time = FALSE;
8536 ++num_source_filenames;
8537 current_function_file = name;
8538 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8539 output_quoted_string (stream, name);
8540 fprintf (stream, "\n");
8541 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8542 fprintf (stream, "\t#@stabs\n");
8543 }
8544
8545 else if (write_symbols == DBX_DEBUG)
8546 /* dbxout.c will emit an appropriate .stabs directive. */
8547 return;
8548
8549 else if (name != current_function_file
8550 && strcmp (name, current_function_file) != 0)
8551 {
8552 if (inside_function && ! TARGET_GAS)
8553 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8554 else
8555 {
8556 ++num_source_filenames;
8557 current_function_file = name;
8558 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8559 }
8560
8561 output_quoted_string (stream, name);
8562 fprintf (stream, "\n");
8563 }
8564 }
8565 \f
8566 /* Structure to show the current status of registers and memory. */
8567
8568 struct shadow_summary
8569 {
8570 struct {
8571 unsigned int i : 31; /* Mask of int regs */
8572 unsigned int fp : 31; /* Mask of fp regs */
8573 unsigned int mem : 1; /* mem == imem | fpmem */
8574 } used, defd;
8575 };
8576
8577 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8578 to the summary structure. SET is nonzero if the insn is setting the
8579 object, otherwise zero. */
8580
8581 static void
8582 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8583 {
8584 const char *format_ptr;
8585 int i, j;
8586
8587 if (x == 0)
8588 return;
8589
8590 switch (GET_CODE (x))
8591 {
8592 /* ??? Note that this case would be incorrect if the Alpha had a
8593 ZERO_EXTRACT in SET_DEST. */
8594 case SET:
8595 summarize_insn (SET_SRC (x), sum, 0);
8596 summarize_insn (SET_DEST (x), sum, 1);
8597 break;
8598
8599 case CLOBBER:
8600 summarize_insn (XEXP (x, 0), sum, 1);
8601 break;
8602
8603 case USE:
8604 summarize_insn (XEXP (x, 0), sum, 0);
8605 break;
8606
8607 case ASM_OPERANDS:
8608 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8609 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8610 break;
8611
8612 case PARALLEL:
8613 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8614 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8615 break;
8616
8617 case SUBREG:
8618 summarize_insn (SUBREG_REG (x), sum, 0);
8619 break;
8620
8621 case REG:
8622 {
8623 int regno = REGNO (x);
8624 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8625
8626 if (regno == 31 || regno == 63)
8627 break;
8628
8629 if (set)
8630 {
8631 if (regno < 32)
8632 sum->defd.i |= mask;
8633 else
8634 sum->defd.fp |= mask;
8635 }
8636 else
8637 {
8638 if (regno < 32)
8639 sum->used.i |= mask;
8640 else
8641 sum->used.fp |= mask;
8642 }
8643 }
8644 break;
8645
8646 case MEM:
8647 if (set)
8648 sum->defd.mem = 1;
8649 else
8650 sum->used.mem = 1;
8651
8652 /* Find the regs used in memory address computation: */
8653 summarize_insn (XEXP (x, 0), sum, 0);
8654 break;
8655
8656 case CONST_INT: case CONST_DOUBLE:
8657 case SYMBOL_REF: case LABEL_REF: case CONST:
8658 case SCRATCH: case ASM_INPUT:
8659 break;
8660
8661 /* Handle common unary and binary ops for efficiency. */
8662 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8663 case MOD: case UDIV: case UMOD: case AND: case IOR:
8664 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8665 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8666 case NE: case EQ: case GE: case GT: case LE:
8667 case LT: case GEU: case GTU: case LEU: case LTU:
8668 summarize_insn (XEXP (x, 0), sum, 0);
8669 summarize_insn (XEXP (x, 1), sum, 0);
8670 break;
8671
8672 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8673 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8674 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8675 case SQRT: case FFS:
8676 summarize_insn (XEXP (x, 0), sum, 0);
8677 break;
8678
8679 default:
8680 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8681 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8682 switch (format_ptr[i])
8683 {
8684 case 'e':
8685 summarize_insn (XEXP (x, i), sum, 0);
8686 break;
8687
8688 case 'E':
8689 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8690 summarize_insn (XVECEXP (x, i, j), sum, 0);
8691 break;
8692
8693 case 'i':
8694 break;
8695
8696 default:
8697 gcc_unreachable ();
8698 }
8699 }
8700 }
8701
8702 /* Ensure a sufficient number of `trapb' insns are in the code when
8703 the user requests code with a trap precision of functions or
8704 instructions.
8705
8706 In naive mode, when the user requests a trap-precision of
8707 "instruction", a trapb is needed after every instruction that may
8708 generate a trap. This ensures that the code is resumption safe but
8709 it is also slow.
8710
8711 When optimizations are turned on, we delay issuing a trapb as long
8712 as possible. In this context, a trap shadow is the sequence of
8713 instructions that starts with a (potentially) trap generating
8714 instruction and extends to the next trapb or call_pal instruction
8715 (but GCC never generates call_pal by itself). We can delay (and
8716 therefore sometimes omit) a trapb subject to the following
8717 conditions:
8718
8719 (a) On entry to the trap shadow, if any Alpha register or memory
8720 location contains a value that is used as an operand value by some
8721 instruction in the trap shadow (live on entry), then no instruction
8722 in the trap shadow may modify the register or memory location.
8723
8724 (b) Within the trap shadow, the computation of the base register
8725 for a memory load or store instruction may not involve using the
8726 result of an instruction that might generate an UNPREDICTABLE
8727 result.
8728
8729 (c) Within the trap shadow, no register may be used more than once
8730 as a destination register. (This is to make life easier for the
8731 trap-handler.)
8732
8733 (d) The trap shadow may not include any branch instructions. */
8734
8735 static void
8736 alpha_handle_trap_shadows (void)
8737 {
8738 struct shadow_summary shadow;
8739 int trap_pending, exception_nesting;
8740 rtx i, n;
8741
8742 trap_pending = 0;
8743 exception_nesting = 0;
8744 shadow.used.i = 0;
8745 shadow.used.fp = 0;
8746 shadow.used.mem = 0;
8747 shadow.defd = shadow.used;
8748
8749 for (i = get_insns (); i ; i = NEXT_INSN (i))
8750 {
8751 if (NOTE_P (i))
8752 {
8753 switch (NOTE_KIND (i))
8754 {
8755 case NOTE_INSN_EH_REGION_BEG:
8756 exception_nesting++;
8757 if (trap_pending)
8758 goto close_shadow;
8759 break;
8760
8761 case NOTE_INSN_EH_REGION_END:
8762 exception_nesting--;
8763 if (trap_pending)
8764 goto close_shadow;
8765 break;
8766
8767 case NOTE_INSN_EPILOGUE_BEG:
8768 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8769 goto close_shadow;
8770 break;
8771 }
8772 }
8773 else if (trap_pending)
8774 {
8775 if (alpha_tp == ALPHA_TP_FUNC)
8776 {
8777 if (JUMP_P (i)
8778 && GET_CODE (PATTERN (i)) == RETURN)
8779 goto close_shadow;
8780 }
8781 else if (alpha_tp == ALPHA_TP_INSN)
8782 {
8783 if (optimize > 0)
8784 {
8785 struct shadow_summary sum;
8786
8787 sum.used.i = 0;
8788 sum.used.fp = 0;
8789 sum.used.mem = 0;
8790 sum.defd = sum.used;
8791
8792 switch (GET_CODE (i))
8793 {
8794 case INSN:
8795 /* Annoyingly, get_attr_trap will die on these. */
8796 if (GET_CODE (PATTERN (i)) == USE
8797 || GET_CODE (PATTERN (i)) == CLOBBER)
8798 break;
8799
8800 summarize_insn (PATTERN (i), &sum, 0);
8801
8802 if ((sum.defd.i & shadow.defd.i)
8803 || (sum.defd.fp & shadow.defd.fp))
8804 {
8805 /* (c) would be violated */
8806 goto close_shadow;
8807 }
8808
8809 /* Combine shadow with summary of current insn: */
8810 shadow.used.i |= sum.used.i;
8811 shadow.used.fp |= sum.used.fp;
8812 shadow.used.mem |= sum.used.mem;
8813 shadow.defd.i |= sum.defd.i;
8814 shadow.defd.fp |= sum.defd.fp;
8815 shadow.defd.mem |= sum.defd.mem;
8816
8817 if ((sum.defd.i & shadow.used.i)
8818 || (sum.defd.fp & shadow.used.fp)
8819 || (sum.defd.mem & shadow.used.mem))
8820 {
8821 /* (a) would be violated (also takes care of (b)) */
8822 gcc_assert (get_attr_trap (i) != TRAP_YES
8823 || (!(sum.defd.i & sum.used.i)
8824 && !(sum.defd.fp & sum.used.fp)));
8825
8826 goto close_shadow;
8827 }
8828 break;
8829
8830 case JUMP_INSN:
8831 case CALL_INSN:
8832 case CODE_LABEL:
8833 goto close_shadow;
8834
8835 default:
8836 gcc_unreachable ();
8837 }
8838 }
8839 else
8840 {
8841 close_shadow:
8842 n = emit_insn_before (gen_trapb (), i);
8843 PUT_MODE (n, TImode);
8844 PUT_MODE (i, TImode);
8845 trap_pending = 0;
8846 shadow.used.i = 0;
8847 shadow.used.fp = 0;
8848 shadow.used.mem = 0;
8849 shadow.defd = shadow.used;
8850 }
8851 }
8852 }
8853
8854 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8855 && NONJUMP_INSN_P (i)
8856 && GET_CODE (PATTERN (i)) != USE
8857 && GET_CODE (PATTERN (i)) != CLOBBER
8858 && get_attr_trap (i) == TRAP_YES)
8859 {
8860 if (optimize && !trap_pending)
8861 summarize_insn (PATTERN (i), &shadow, 0);
8862 trap_pending = 1;
8863 }
8864 }
8865 }
8866 \f
8867 /* Alpha can only issue instruction groups simultaneously if they are
8868 suitably aligned. This is very processor-specific. */
8869 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8870 that are marked "fake". These instructions do not exist on that target,
8871 but it is possible to see these insns with deranged combinations of
8872 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8873 choose a result at random. */
8874
8875 enum alphaev4_pipe {
8876 EV4_STOP = 0,
8877 EV4_IB0 = 1,
8878 EV4_IB1 = 2,
8879 EV4_IBX = 4
8880 };
8881
8882 enum alphaev5_pipe {
8883 EV5_STOP = 0,
8884 EV5_NONE = 1,
8885 EV5_E01 = 2,
8886 EV5_E0 = 4,
8887 EV5_E1 = 8,
8888 EV5_FAM = 16,
8889 EV5_FA = 32,
8890 EV5_FM = 64
8891 };
8892
8893 static enum alphaev4_pipe
8894 alphaev4_insn_pipe (rtx insn)
8895 {
8896 if (recog_memoized (insn) < 0)
8897 return EV4_STOP;
8898 if (get_attr_length (insn) != 4)
8899 return EV4_STOP;
8900
8901 switch (get_attr_type (insn))
8902 {
8903 case TYPE_ILD:
8904 case TYPE_LDSYM:
8905 case TYPE_FLD:
8906 case TYPE_LD_L:
8907 return EV4_IBX;
8908
8909 case TYPE_IADD:
8910 case TYPE_ILOG:
8911 case TYPE_ICMOV:
8912 case TYPE_ICMP:
8913 case TYPE_FST:
8914 case TYPE_SHIFT:
8915 case TYPE_IMUL:
8916 case TYPE_FBR:
8917 case TYPE_MVI: /* fake */
8918 return EV4_IB0;
8919
8920 case TYPE_IST:
8921 case TYPE_MISC:
8922 case TYPE_IBR:
8923 case TYPE_JSR:
8924 case TYPE_CALLPAL:
8925 case TYPE_FCPYS:
8926 case TYPE_FCMOV:
8927 case TYPE_FADD:
8928 case TYPE_FDIV:
8929 case TYPE_FMUL:
8930 case TYPE_ST_C:
8931 case TYPE_MB:
8932 case TYPE_FSQRT: /* fake */
8933 case TYPE_FTOI: /* fake */
8934 case TYPE_ITOF: /* fake */
8935 return EV4_IB1;
8936
8937 default:
8938 gcc_unreachable ();
8939 }
8940 }
8941
8942 static enum alphaev5_pipe
8943 alphaev5_insn_pipe (rtx insn)
8944 {
8945 if (recog_memoized (insn) < 0)
8946 return EV5_STOP;
8947 if (get_attr_length (insn) != 4)
8948 return EV5_STOP;
8949
8950 switch (get_attr_type (insn))
8951 {
8952 case TYPE_ILD:
8953 case TYPE_FLD:
8954 case TYPE_LDSYM:
8955 case TYPE_IADD:
8956 case TYPE_ILOG:
8957 case TYPE_ICMOV:
8958 case TYPE_ICMP:
8959 return EV5_E01;
8960
8961 case TYPE_IST:
8962 case TYPE_FST:
8963 case TYPE_SHIFT:
8964 case TYPE_IMUL:
8965 case TYPE_MISC:
8966 case TYPE_MVI:
8967 case TYPE_LD_L:
8968 case TYPE_ST_C:
8969 case TYPE_MB:
8970 case TYPE_FTOI: /* fake */
8971 case TYPE_ITOF: /* fake */
8972 return EV5_E0;
8973
8974 case TYPE_IBR:
8975 case TYPE_JSR:
8976 case TYPE_CALLPAL:
8977 return EV5_E1;
8978
8979 case TYPE_FCPYS:
8980 return EV5_FAM;
8981
8982 case TYPE_FBR:
8983 case TYPE_FCMOV:
8984 case TYPE_FADD:
8985 case TYPE_FDIV:
8986 case TYPE_FSQRT: /* fake */
8987 return EV5_FA;
8988
8989 case TYPE_FMUL:
8990 return EV5_FM;
8991
8992 default:
8993 gcc_unreachable ();
8994 }
8995 }
8996
8997 /* IN_USE is a mask of the slots currently filled within the insn group.
8998 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8999 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9000
9001 LEN is, of course, the length of the group in bytes. */
9002
9003 static rtx
9004 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9005 {
9006 int len, in_use;
9007
9008 len = in_use = 0;
9009
9010 if (! INSN_P (insn)
9011 || GET_CODE (PATTERN (insn)) == CLOBBER
9012 || GET_CODE (PATTERN (insn)) == USE)
9013 goto next_and_done;
9014
9015 while (1)
9016 {
9017 enum alphaev4_pipe pipe;
9018
9019 pipe = alphaev4_insn_pipe (insn);
9020 switch (pipe)
9021 {
9022 case EV4_STOP:
9023 /* Force complex instructions to start new groups. */
9024 if (in_use)
9025 goto done;
9026
9027 /* If this is a completely unrecognized insn, it's an asm.
9028 We don't know how long it is, so record length as -1 to
9029 signal a needed realignment. */
9030 if (recog_memoized (insn) < 0)
9031 len = -1;
9032 else
9033 len = get_attr_length (insn);
9034 goto next_and_done;
9035
9036 case EV4_IBX:
9037 if (in_use & EV4_IB0)
9038 {
9039 if (in_use & EV4_IB1)
9040 goto done;
9041 in_use |= EV4_IB1;
9042 }
9043 else
9044 in_use |= EV4_IB0 | EV4_IBX;
9045 break;
9046
9047 case EV4_IB0:
9048 if (in_use & EV4_IB0)
9049 {
9050 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9051 goto done;
9052 in_use |= EV4_IB1;
9053 }
9054 in_use |= EV4_IB0;
9055 break;
9056
9057 case EV4_IB1:
9058 if (in_use & EV4_IB1)
9059 goto done;
9060 in_use |= EV4_IB1;
9061 break;
9062
9063 default:
9064 gcc_unreachable ();
9065 }
9066 len += 4;
9067
9068 /* Haifa doesn't do well scheduling branches. */
9069 if (JUMP_P (insn))
9070 goto next_and_done;
9071
9072 next:
9073 insn = next_nonnote_insn (insn);
9074
9075 if (!insn || ! INSN_P (insn))
9076 goto done;
9077
9078 /* Let Haifa tell us where it thinks insn group boundaries are. */
9079 if (GET_MODE (insn) == TImode)
9080 goto done;
9081
9082 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9083 goto next;
9084 }
9085
9086 next_and_done:
9087 insn = next_nonnote_insn (insn);
9088
9089 done:
9090 *plen = len;
9091 *pin_use = in_use;
9092 return insn;
9093 }
9094
9095 /* IN_USE is a mask of the slots currently filled within the insn group.
9096 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9097 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9098
9099 LEN is, of course, the length of the group in bytes. */
9100
9101 static rtx
9102 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9103 {
9104 int len, in_use;
9105
9106 len = in_use = 0;
9107
9108 if (! INSN_P (insn)
9109 || GET_CODE (PATTERN (insn)) == CLOBBER
9110 || GET_CODE (PATTERN (insn)) == USE)
9111 goto next_and_done;
9112
9113 while (1)
9114 {
9115 enum alphaev5_pipe pipe;
9116
9117 pipe = alphaev5_insn_pipe (insn);
9118 switch (pipe)
9119 {
9120 case EV5_STOP:
9121 /* Force complex instructions to start new groups. */
9122 if (in_use)
9123 goto done;
9124
9125 /* If this is a completely unrecognized insn, it's an asm.
9126 We don't know how long it is, so record length as -1 to
9127 signal a needed realignment. */
9128 if (recog_memoized (insn) < 0)
9129 len = -1;
9130 else
9131 len = get_attr_length (insn);
9132 goto next_and_done;
9133
9134 /* ??? Most of the places below, we would like to assert never
9135 happen, as it would indicate an error either in Haifa, or
9136 in the scheduling description. Unfortunately, Haifa never
9137 schedules the last instruction of the BB, so we don't have
9138 an accurate TI bit to go off. */
9139 case EV5_E01:
9140 if (in_use & EV5_E0)
9141 {
9142 if (in_use & EV5_E1)
9143 goto done;
9144 in_use |= EV5_E1;
9145 }
9146 else
9147 in_use |= EV5_E0 | EV5_E01;
9148 break;
9149
9150 case EV5_E0:
9151 if (in_use & EV5_E0)
9152 {
9153 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9154 goto done;
9155 in_use |= EV5_E1;
9156 }
9157 in_use |= EV5_E0;
9158 break;
9159
9160 case EV5_E1:
9161 if (in_use & EV5_E1)
9162 goto done;
9163 in_use |= EV5_E1;
9164 break;
9165
9166 case EV5_FAM:
9167 if (in_use & EV5_FA)
9168 {
9169 if (in_use & EV5_FM)
9170 goto done;
9171 in_use |= EV5_FM;
9172 }
9173 else
9174 in_use |= EV5_FA | EV5_FAM;
9175 break;
9176
9177 case EV5_FA:
9178 if (in_use & EV5_FA)
9179 goto done;
9180 in_use |= EV5_FA;
9181 break;
9182
9183 case EV5_FM:
9184 if (in_use & EV5_FM)
9185 goto done;
9186 in_use |= EV5_FM;
9187 break;
9188
9189 case EV5_NONE:
9190 break;
9191
9192 default:
9193 gcc_unreachable ();
9194 }
9195 len += 4;
9196
9197 /* Haifa doesn't do well scheduling branches. */
9198 /* ??? If this is predicted not-taken, slotting continues, except
9199 that no more IBR, FBR, or JSR insns may be slotted. */
9200 if (JUMP_P (insn))
9201 goto next_and_done;
9202
9203 next:
9204 insn = next_nonnote_insn (insn);
9205
9206 if (!insn || ! INSN_P (insn))
9207 goto done;
9208
9209 /* Let Haifa tell us where it thinks insn group boundaries are. */
9210 if (GET_MODE (insn) == TImode)
9211 goto done;
9212
9213 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9214 goto next;
9215 }
9216
9217 next_and_done:
9218 insn = next_nonnote_insn (insn);
9219
9220 done:
9221 *plen = len;
9222 *pin_use = in_use;
9223 return insn;
9224 }
9225
9226 static rtx
9227 alphaev4_next_nop (int *pin_use)
9228 {
9229 int in_use = *pin_use;
9230 rtx nop;
9231
9232 if (!(in_use & EV4_IB0))
9233 {
9234 in_use |= EV4_IB0;
9235 nop = gen_nop ();
9236 }
9237 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9238 {
9239 in_use |= EV4_IB1;
9240 nop = gen_nop ();
9241 }
9242 else if (TARGET_FP && !(in_use & EV4_IB1))
9243 {
9244 in_use |= EV4_IB1;
9245 nop = gen_fnop ();
9246 }
9247 else
9248 nop = gen_unop ();
9249
9250 *pin_use = in_use;
9251 return nop;
9252 }
9253
9254 static rtx
9255 alphaev5_next_nop (int *pin_use)
9256 {
9257 int in_use = *pin_use;
9258 rtx nop;
9259
9260 if (!(in_use & EV5_E1))
9261 {
9262 in_use |= EV5_E1;
9263 nop = gen_nop ();
9264 }
9265 else if (TARGET_FP && !(in_use & EV5_FA))
9266 {
9267 in_use |= EV5_FA;
9268 nop = gen_fnop ();
9269 }
9270 else if (TARGET_FP && !(in_use & EV5_FM))
9271 {
9272 in_use |= EV5_FM;
9273 nop = gen_fnop ();
9274 }
9275 else
9276 nop = gen_unop ();
9277
9278 *pin_use = in_use;
9279 return nop;
9280 }
9281
9282 /* The instruction group alignment main loop. */
9283
9284 static void
9285 alpha_align_insns (unsigned int max_align,
9286 rtx (*next_group) (rtx, int *, int *),
9287 rtx (*next_nop) (int *))
9288 {
9289 /* ALIGN is the known alignment for the insn group. */
9290 unsigned int align;
9291 /* OFS is the offset of the current insn in the insn group. */
9292 int ofs;
9293 int prev_in_use, in_use, len, ldgp;
9294 rtx i, next;
9295
9296 /* Let shorten branches care for assigning alignments to code labels. */
9297 shorten_branches (get_insns ());
9298
9299 if (align_functions < 4)
9300 align = 4;
9301 else if ((unsigned int) align_functions < max_align)
9302 align = align_functions;
9303 else
9304 align = max_align;
9305
9306 ofs = prev_in_use = 0;
9307 i = get_insns ();
9308 if (NOTE_P (i))
9309 i = next_nonnote_insn (i);
9310
9311 ldgp = alpha_function_needs_gp ? 8 : 0;
9312
9313 while (i)
9314 {
9315 next = (*next_group) (i, &in_use, &len);
9316
9317 /* When we see a label, resync alignment etc. */
9318 if (LABEL_P (i))
9319 {
9320 unsigned int new_align = 1 << label_to_alignment (i);
9321
9322 if (new_align >= align)
9323 {
9324 align = new_align < max_align ? new_align : max_align;
9325 ofs = 0;
9326 }
9327
9328 else if (ofs & (new_align-1))
9329 ofs = (ofs | (new_align-1)) + 1;
9330 gcc_assert (!len);
9331 }
9332
9333 /* Handle complex instructions special. */
9334 else if (in_use == 0)
9335 {
9336 /* Asms will have length < 0. This is a signal that we have
9337 lost alignment knowledge. Assume, however, that the asm
9338 will not mis-align instructions. */
9339 if (len < 0)
9340 {
9341 ofs = 0;
9342 align = 4;
9343 len = 0;
9344 }
9345 }
9346
9347 /* If the known alignment is smaller than the recognized insn group,
9348 realign the output. */
9349 else if ((int) align < len)
9350 {
9351 unsigned int new_log_align = len > 8 ? 4 : 3;
9352 rtx prev, where;
9353
9354 where = prev = prev_nonnote_insn (i);
9355 if (!where || !LABEL_P (where))
9356 where = i;
9357
9358 /* Can't realign between a call and its gp reload. */
9359 if (! (TARGET_EXPLICIT_RELOCS
9360 && prev && CALL_P (prev)))
9361 {
9362 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9363 align = 1 << new_log_align;
9364 ofs = 0;
9365 }
9366 }
9367
9368 /* We may not insert padding inside the initial ldgp sequence. */
9369 else if (ldgp > 0)
9370 ldgp -= len;
9371
9372 /* If the group won't fit in the same INT16 as the previous,
9373 we need to add padding to keep the group together. Rather
9374 than simply leaving the insn filling to the assembler, we
9375 can make use of the knowledge of what sorts of instructions
9376 were issued in the previous group to make sure that all of
9377 the added nops are really free. */
9378 else if (ofs + len > (int) align)
9379 {
9380 int nop_count = (align - ofs) / 4;
9381 rtx where;
9382
9383 /* Insert nops before labels, branches, and calls to truly merge
9384 the execution of the nops with the previous instruction group. */
9385 where = prev_nonnote_insn (i);
9386 if (where)
9387 {
9388 if (LABEL_P (where))
9389 {
9390 rtx where2 = prev_nonnote_insn (where);
9391 if (where2 && JUMP_P (where2))
9392 where = where2;
9393 }
9394 else if (NONJUMP_INSN_P (where))
9395 where = i;
9396 }
9397 else
9398 where = i;
9399
9400 do
9401 emit_insn_before ((*next_nop)(&prev_in_use), where);
9402 while (--nop_count);
9403 ofs = 0;
9404 }
9405
9406 ofs = (ofs + len) & (align - 1);
9407 prev_in_use = in_use;
9408 i = next;
9409 }
9410 }
9411
9412 /* Insert an unop between a noreturn function call and GP load. */
9413
9414 static void
9415 alpha_pad_noreturn (void)
9416 {
9417 rtx insn, next;
9418
9419 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9420 {
9421 if (!CALL_P (insn)
9422 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9423 continue;
9424
9425 next = next_active_insn (insn);
9426
9427 if (next)
9428 {
9429 rtx pat = PATTERN (next);
9430
9431 if (GET_CODE (pat) == SET
9432 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9433 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9434 emit_insn_after (gen_unop (), insn);
9435 }
9436 }
9437 }
9438 \f
9439 /* Machine dependent reorg pass. */
9440
9441 static void
9442 alpha_reorg (void)
9443 {
9444 /* Workaround for a linker error that triggers when an
9445 exception handler immediatelly follows a noreturn function.
9446
9447 The instruction stream from an object file:
9448
9449 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9450 58: 00 00 ba 27 ldah gp,0(ra)
9451 5c: 00 00 bd 23 lda gp,0(gp)
9452 60: 00 00 7d a7 ldq t12,0(gp)
9453 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9454
9455 was converted in the final link pass to:
9456
9457 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9458 fdb28: 00 00 fe 2f unop
9459 fdb2c: 00 00 fe 2f unop
9460 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9461 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9462
9463 GP load instructions were wrongly cleared by the linker relaxation
9464 pass. This workaround prevents removal of GP loads by inserting
9465 an unop instruction between a noreturn function call and
9466 exception handler prologue. */
9467
9468 if (current_function_has_exception_handlers ())
9469 alpha_pad_noreturn ();
9470
9471 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9472 alpha_handle_trap_shadows ();
9473
9474 /* Due to the number of extra trapb insns, don't bother fixing up
9475 alignment when trap precision is instruction. Moreover, we can
9476 only do our job when sched2 is run. */
9477 if (optimize && !optimize_size
9478 && alpha_tp != ALPHA_TP_INSN
9479 && flag_schedule_insns_after_reload)
9480 {
9481 if (alpha_tune == PROCESSOR_EV4)
9482 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9483 else if (alpha_tune == PROCESSOR_EV5)
9484 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9485 }
9486 }
9487 \f
9488 #if !TARGET_ABI_UNICOSMK
9489
9490 #ifdef HAVE_STAMP_H
9491 #include <stamp.h>
9492 #endif
9493
9494 static void
9495 alpha_file_start (void)
9496 {
9497 #ifdef OBJECT_FORMAT_ELF
9498 /* If emitting dwarf2 debug information, we cannot generate a .file
9499 directive to start the file, as it will conflict with dwarf2out
9500 file numbers. So it's only useful when emitting mdebug output. */
9501 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9502 #endif
9503
9504 default_file_start ();
9505 #ifdef MS_STAMP
9506 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9507 #endif
9508
9509 fputs ("\t.set noreorder\n", asm_out_file);
9510 fputs ("\t.set volatile\n", asm_out_file);
9511 if (!TARGET_ABI_OPEN_VMS)
9512 fputs ("\t.set noat\n", asm_out_file);
9513 if (TARGET_EXPLICIT_RELOCS)
9514 fputs ("\t.set nomacro\n", asm_out_file);
9515 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9516 {
9517 const char *arch;
9518
9519 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9520 arch = "ev6";
9521 else if (TARGET_MAX)
9522 arch = "pca56";
9523 else if (TARGET_BWX)
9524 arch = "ev56";
9525 else if (alpha_cpu == PROCESSOR_EV5)
9526 arch = "ev5";
9527 else
9528 arch = "ev4";
9529
9530 fprintf (asm_out_file, "\t.arch %s\n", arch);
9531 }
9532 }
9533 #endif
9534
9535 #ifdef OBJECT_FORMAT_ELF
9536 /* Since we don't have a .dynbss section, we should not allow global
9537 relocations in the .rodata section. */
9538
9539 static int
9540 alpha_elf_reloc_rw_mask (void)
9541 {
9542 return flag_pic ? 3 : 2;
9543 }
9544
9545 /* Return a section for X. The only special thing we do here is to
9546 honor small data. */
9547
9548 static section *
9549 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9550 unsigned HOST_WIDE_INT align)
9551 {
9552 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9553 /* ??? Consider using mergeable sdata sections. */
9554 return sdata_section;
9555 else
9556 return default_elf_select_rtx_section (mode, x, align);
9557 }
9558
9559 static unsigned int
9560 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9561 {
9562 unsigned int flags = 0;
9563
9564 if (strcmp (name, ".sdata") == 0
9565 || strncmp (name, ".sdata.", 7) == 0
9566 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9567 || strcmp (name, ".sbss") == 0
9568 || strncmp (name, ".sbss.", 6) == 0
9569 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9570 flags = SECTION_SMALL;
9571
9572 flags |= default_section_type_flags (decl, name, reloc);
9573 return flags;
9574 }
9575 #endif /* OBJECT_FORMAT_ELF */
9576 \f
9577 /* Structure to collect function names for final output in link section. */
9578 /* Note that items marked with GTY can't be ifdef'ed out. */
9579
9580 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9581 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9582
9583 struct GTY(()) alpha_links
9584 {
9585 int num;
9586 rtx linkage;
9587 enum links_kind lkind;
9588 enum reloc_kind rkind;
9589 };
9590
9591 struct GTY(()) alpha_funcs
9592 {
9593 int num;
9594 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9595 links;
9596 };
9597
9598 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9599 splay_tree alpha_links_tree;
9600 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9601 splay_tree alpha_funcs_tree;
9602
9603 static GTY(()) int alpha_funcs_num;
9604
9605 #if TARGET_ABI_OPEN_VMS
9606
9607 /* Return the VMS argument type corresponding to MODE. */
9608
9609 enum avms_arg_type
9610 alpha_arg_type (enum machine_mode mode)
9611 {
9612 switch (mode)
9613 {
9614 case SFmode:
9615 return TARGET_FLOAT_VAX ? FF : FS;
9616 case DFmode:
9617 return TARGET_FLOAT_VAX ? FD : FT;
9618 default:
9619 return I64;
9620 }
9621 }
9622
9623 /* Return an rtx for an integer representing the VMS Argument Information
9624 register value. */
9625
9626 rtx
9627 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9628 {
9629 unsigned HOST_WIDE_INT regval = cum.num_args;
9630 int i;
9631
9632 for (i = 0; i < 6; i++)
9633 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9634
9635 return GEN_INT (regval);
9636 }
9637 \f
9638 /* Make (or fake) .linkage entry for function call.
9639
9640 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9641
9642 Return an SYMBOL_REF rtx for the linkage. */
9643
9644 rtx
9645 alpha_need_linkage (const char *name, int is_local)
9646 {
9647 splay_tree_node node;
9648 struct alpha_links *al;
9649
9650 if (name[0] == '*')
9651 name++;
9652
9653 if (is_local)
9654 {
9655 struct alpha_funcs *cfaf;
9656
9657 if (!alpha_funcs_tree)
9658 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9659 splay_tree_compare_pointers);
9660
9661 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9662
9663 cfaf->links = 0;
9664 cfaf->num = ++alpha_funcs_num;
9665
9666 splay_tree_insert (alpha_funcs_tree,
9667 (splay_tree_key) current_function_decl,
9668 (splay_tree_value) cfaf);
9669 }
9670
9671 if (alpha_links_tree)
9672 {
9673 /* Is this name already defined? */
9674
9675 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9676 if (node)
9677 {
9678 al = (struct alpha_links *) node->value;
9679 if (is_local)
9680 {
9681 /* Defined here but external assumed. */
9682 if (al->lkind == KIND_EXTERN)
9683 al->lkind = KIND_LOCAL;
9684 }
9685 else
9686 {
9687 /* Used here but unused assumed. */
9688 if (al->lkind == KIND_UNUSED)
9689 al->lkind = KIND_LOCAL;
9690 }
9691 return al->linkage;
9692 }
9693 }
9694 else
9695 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9696
9697 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9698 name = ggc_strdup (name);
9699
9700 /* Assume external if no definition. */
9701 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9702
9703 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9704 get_identifier (name);
9705
9706 /* Construct a SYMBOL_REF for us to call. */
9707 {
9708 size_t name_len = strlen (name);
9709 char *linksym = XALLOCAVEC (char, name_len + 6);
9710 linksym[0] = '$';
9711 memcpy (linksym + 1, name, name_len);
9712 memcpy (linksym + 1 + name_len, "..lk", 5);
9713 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9714 ggc_alloc_string (linksym, name_len + 5));
9715 }
9716
9717 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9718 (splay_tree_value) al);
9719
9720 return al->linkage;
9721 }
9722
9723 rtx
9724 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9725 {
9726 splay_tree_node cfunnode;
9727 struct alpha_funcs *cfaf;
9728 struct alpha_links *al;
9729 const char *name = XSTR (linkage, 0);
9730
9731 cfaf = (struct alpha_funcs *) 0;
9732 al = (struct alpha_links *) 0;
9733
9734 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9735 cfaf = (struct alpha_funcs *) cfunnode->value;
9736
9737 if (cfaf->links)
9738 {
9739 splay_tree_node lnode;
9740
9741 /* Is this name already defined? */
9742
9743 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9744 if (lnode)
9745 al = (struct alpha_links *) lnode->value;
9746 }
9747 else
9748 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9749
9750 if (!al)
9751 {
9752 size_t name_len;
9753 size_t buflen;
9754 char buf [512];
9755 char *linksym;
9756 splay_tree_node node = 0;
9757 struct alpha_links *anl;
9758
9759 if (name[0] == '*')
9760 name++;
9761
9762 name_len = strlen (name);
9763
9764 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9765 al->num = cfaf->num;
9766
9767 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9768 if (node)
9769 {
9770 anl = (struct alpha_links *) node->value;
9771 al->lkind = anl->lkind;
9772 }
9773
9774 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9775 buflen = strlen (buf);
9776 linksym = XALLOCAVEC (char, buflen + 1);
9777 memcpy (linksym, buf, buflen + 1);
9778
9779 al->linkage = gen_rtx_SYMBOL_REF
9780 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9781
9782 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9783 (splay_tree_value) al);
9784 }
9785
9786 if (rflag)
9787 al->rkind = KIND_CODEADDR;
9788 else
9789 al->rkind = KIND_LINKAGE;
9790
9791 if (lflag)
9792 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9793 else
9794 return al->linkage;
9795 }
9796
9797 static int
9798 alpha_write_one_linkage (splay_tree_node node, void *data)
9799 {
9800 const char *const name = (const char *) node->key;
9801 struct alpha_links *link = (struct alpha_links *) node->value;
9802 FILE *stream = (FILE *) data;
9803
9804 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9805 if (link->rkind == KIND_CODEADDR)
9806 {
9807 if (link->lkind == KIND_LOCAL)
9808 {
9809 /* Local and used */
9810 fprintf (stream, "\t.quad %s..en\n", name);
9811 }
9812 else
9813 {
9814 /* External and used, request code address. */
9815 fprintf (stream, "\t.code_address %s\n", name);
9816 }
9817 }
9818 else
9819 {
9820 if (link->lkind == KIND_LOCAL)
9821 {
9822 /* Local and used, build linkage pair. */
9823 fprintf (stream, "\t.quad %s..en\n", name);
9824 fprintf (stream, "\t.quad %s\n", name);
9825 }
9826 else
9827 {
9828 /* External and used, request linkage pair. */
9829 fprintf (stream, "\t.linkage %s\n", name);
9830 }
9831 }
9832
9833 return 0;
9834 }
9835
9836 static void
9837 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9838 {
9839 splay_tree_node node;
9840 struct alpha_funcs *func;
9841
9842 fprintf (stream, "\t.link\n");
9843 fprintf (stream, "\t.align 3\n");
9844 in_section = NULL;
9845
9846 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9847 func = (struct alpha_funcs *) node->value;
9848
9849 fputs ("\t.name ", stream);
9850 assemble_name (stream, funname);
9851 fputs ("..na\n", stream);
9852 ASM_OUTPUT_LABEL (stream, funname);
9853 fprintf (stream, "\t.pdesc ");
9854 assemble_name (stream, funname);
9855 fprintf (stream, "..en,%s\n",
9856 alpha_procedure_type == PT_STACK ? "stack"
9857 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9858
9859 if (func->links)
9860 {
9861 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9862 /* splay_tree_delete (func->links); */
9863 }
9864 }
9865
9866 /* Given a decl, a section name, and whether the decl initializer
9867 has relocs, choose attributes for the section. */
9868
9869 #define SECTION_VMS_OVERLAY SECTION_FORGET
9870 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9871 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9872
9873 static unsigned int
9874 vms_section_type_flags (tree decl, const char *name, int reloc)
9875 {
9876 unsigned int flags = default_section_type_flags (decl, name, reloc);
9877
9878 if (decl && DECL_ATTRIBUTES (decl)
9879 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9880 flags |= SECTION_VMS_OVERLAY;
9881 if (decl && DECL_ATTRIBUTES (decl)
9882 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9883 flags |= SECTION_VMS_GLOBAL;
9884 if (decl && DECL_ATTRIBUTES (decl)
9885 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9886 flags |= SECTION_VMS_INITIALIZE;
9887
9888 return flags;
9889 }
9890
9891 /* Switch to an arbitrary section NAME with attributes as specified
9892 by FLAGS. ALIGN specifies any known alignment requirements for
9893 the section; 0 if the default should be used. */
9894
9895 static void
9896 vms_asm_named_section (const char *name, unsigned int flags,
9897 tree decl ATTRIBUTE_UNUSED)
9898 {
9899 fputc ('\n', asm_out_file);
9900 fprintf (asm_out_file, ".section\t%s", name);
9901
9902 if (flags & SECTION_VMS_OVERLAY)
9903 fprintf (asm_out_file, ",OVR");
9904 if (flags & SECTION_VMS_GLOBAL)
9905 fprintf (asm_out_file, ",GBL");
9906 if (flags & SECTION_VMS_INITIALIZE)
9907 fprintf (asm_out_file, ",NOMOD");
9908 if (flags & SECTION_DEBUG)
9909 fprintf (asm_out_file, ",NOWRT");
9910
9911 fputc ('\n', asm_out_file);
9912 }
9913
9914 /* Record an element in the table of global constructors. SYMBOL is
9915 a SYMBOL_REF of the function to be called; PRIORITY is a number
9916 between 0 and MAX_INIT_PRIORITY.
9917
9918 Differs from default_ctors_section_asm_out_constructor in that the
9919 width of the .ctors entry is always 64 bits, rather than the 32 bits
9920 used by a normal pointer. */
9921
9922 static void
9923 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9924 {
9925 switch_to_section (ctors_section);
9926 assemble_align (BITS_PER_WORD);
9927 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9928 }
9929
9930 static void
9931 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9932 {
9933 switch_to_section (dtors_section);
9934 assemble_align (BITS_PER_WORD);
9935 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9936 }
9937 #else
9938
9939 rtx
9940 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9941 int is_local ATTRIBUTE_UNUSED)
9942 {
9943 return NULL_RTX;
9944 }
9945
9946 rtx
9947 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9948 tree cfundecl ATTRIBUTE_UNUSED,
9949 int lflag ATTRIBUTE_UNUSED,
9950 int rflag ATTRIBUTE_UNUSED)
9951 {
9952 return NULL_RTX;
9953 }
9954
9955 #endif /* TARGET_ABI_OPEN_VMS */
9956 \f
9957 #if TARGET_ABI_UNICOSMK
9958
9959 /* This evaluates to true if we do not know how to pass TYPE solely in
9960 registers. This is the case for all arguments that do not fit in two
9961 registers. */
9962
9963 static bool
9964 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9965 {
9966 if (type == NULL)
9967 return false;
9968
9969 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9970 return true;
9971 if (TREE_ADDRESSABLE (type))
9972 return true;
9973
9974 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9975 }
9976
9977 /* Define the offset between two registers, one to be eliminated, and the
9978 other its replacement, at the start of a routine. */
9979
9980 int
9981 unicosmk_initial_elimination_offset (int from, int to)
9982 {
9983 int fixed_size;
9984
9985 fixed_size = alpha_sa_size();
9986 if (fixed_size != 0)
9987 fixed_size += 48;
9988
9989 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9990 return -fixed_size;
9991 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9992 return 0;
9993 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9994 return (ALPHA_ROUND (crtl->outgoing_args_size)
9995 + ALPHA_ROUND (get_frame_size()));
9996 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9997 return (ALPHA_ROUND (fixed_size)
9998 + ALPHA_ROUND (get_frame_size()
9999 + crtl->outgoing_args_size));
10000 else
10001 gcc_unreachable ();
10002 }
10003
10004 /* Output the module name for .ident and .end directives. We have to strip
10005 directories and add make sure that the module name starts with a letter
10006 or '$'. */
10007
10008 static void
10009 unicosmk_output_module_name (FILE *file)
10010 {
10011 const char *name = lbasename (main_input_filename);
10012 unsigned len = strlen (name);
10013 char *clean_name = alloca (len + 2);
10014 char *ptr = clean_name;
10015
10016 /* CAM only accepts module names that start with a letter or '$'. We
10017 prefix the module name with a '$' if necessary. */
10018
10019 if (!ISALPHA (*name))
10020 *ptr++ = '$';
10021 memcpy (ptr, name, len + 1);
10022 clean_symbol_name (clean_name);
10023 fputs (clean_name, file);
10024 }
10025
10026 /* Output the definition of a common variable. */
10027
10028 void
10029 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10030 {
10031 tree name_tree;
10032 printf ("T3E__: common %s\n", name);
10033
10034 in_section = NULL;
10035 fputs("\t.endp\n\n\t.psect ", file);
10036 assemble_name(file, name);
10037 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10038 fprintf(file, "\t.byte\t0:%d\n", size);
10039
10040 /* Mark the symbol as defined in this module. */
10041 name_tree = get_identifier (name);
10042 TREE_ASM_WRITTEN (name_tree) = 1;
10043 }
10044
10045 #define SECTION_PUBLIC SECTION_MACH_DEP
10046 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10047 static int current_section_align;
10048
10049 /* A get_unnamed_section callback for switching to the text section. */
10050
10051 static void
10052 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10053 {
10054 static int count = 0;
10055 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10056 }
10057
10058 /* A get_unnamed_section callback for switching to the data section. */
10059
10060 static void
10061 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10062 {
10063 static int count = 1;
10064 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10065 }
10066
10067 /* Implement TARGET_ASM_INIT_SECTIONS.
10068
10069 The Cray assembler is really weird with respect to sections. It has only
10070 named sections and you can't reopen a section once it has been closed.
10071 This means that we have to generate unique names whenever we want to
10072 reenter the text or the data section. */
10073
10074 static void
10075 unicosmk_init_sections (void)
10076 {
10077 text_section = get_unnamed_section (SECTION_CODE,
10078 unicosmk_output_text_section_asm_op,
10079 NULL);
10080 data_section = get_unnamed_section (SECTION_WRITE,
10081 unicosmk_output_data_section_asm_op,
10082 NULL);
10083 readonly_data_section = data_section;
10084 }
10085
10086 static unsigned int
10087 unicosmk_section_type_flags (tree decl, const char *name,
10088 int reloc ATTRIBUTE_UNUSED)
10089 {
10090 unsigned int flags = default_section_type_flags (decl, name, reloc);
10091
10092 if (!decl)
10093 return flags;
10094
10095 if (TREE_CODE (decl) == FUNCTION_DECL)
10096 {
10097 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10098 if (align_functions_log > current_section_align)
10099 current_section_align = align_functions_log;
10100
10101 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10102 flags |= SECTION_MAIN;
10103 }
10104 else
10105 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10106
10107 if (TREE_PUBLIC (decl))
10108 flags |= SECTION_PUBLIC;
10109
10110 return flags;
10111 }
10112
10113 /* Generate a section name for decl and associate it with the
10114 declaration. */
10115
10116 static void
10117 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10118 {
10119 const char *name;
10120 int len;
10121
10122 gcc_assert (decl);
10123
10124 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10125 name = default_strip_name_encoding (name);
10126 len = strlen (name);
10127
10128 if (TREE_CODE (decl) == FUNCTION_DECL)
10129 {
10130 char *string;
10131
10132 /* It is essential that we prefix the section name here because
10133 otherwise the section names generated for constructors and
10134 destructors confuse collect2. */
10135
10136 string = alloca (len + 6);
10137 sprintf (string, "code@%s", name);
10138 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10139 }
10140 else if (TREE_PUBLIC (decl))
10141 DECL_SECTION_NAME (decl) = build_string (len, name);
10142 else
10143 {
10144 char *string;
10145
10146 string = alloca (len + 6);
10147 sprintf (string, "data@%s", name);
10148 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10149 }
10150 }
10151
10152 /* Switch to an arbitrary section NAME with attributes as specified
10153 by FLAGS. ALIGN specifies any known alignment requirements for
10154 the section; 0 if the default should be used. */
10155
10156 static void
10157 unicosmk_asm_named_section (const char *name, unsigned int flags,
10158 tree decl ATTRIBUTE_UNUSED)
10159 {
10160 const char *kind;
10161
10162 /* Close the previous section. */
10163
10164 fputs ("\t.endp\n\n", asm_out_file);
10165
10166 /* Find out what kind of section we are opening. */
10167
10168 if (flags & SECTION_MAIN)
10169 fputs ("\t.start\tmain\n", asm_out_file);
10170
10171 if (flags & SECTION_CODE)
10172 kind = "code";
10173 else if (flags & SECTION_PUBLIC)
10174 kind = "common";
10175 else
10176 kind = "data";
10177
10178 if (current_section_align != 0)
10179 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10180 current_section_align, kind);
10181 else
10182 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10183 }
10184
10185 static void
10186 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10187 {
10188 if (DECL_P (decl)
10189 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10190 unicosmk_unique_section (decl, 0);
10191 }
10192
10193 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10194 in code sections because .align fill unused space with zeroes. */
10195
10196 void
10197 unicosmk_output_align (FILE *file, int align)
10198 {
10199 if (inside_function)
10200 fprintf (file, "\tgcc@code@align\t%d\n", align);
10201 else
10202 fprintf (file, "\t.align\t%d\n", align);
10203 }
10204
10205 /* Add a case vector to the current function's list of deferred case
10206 vectors. Case vectors have to be put into a separate section because CAM
10207 does not allow data definitions in code sections. */
10208
10209 void
10210 unicosmk_defer_case_vector (rtx lab, rtx vec)
10211 {
10212 struct machine_function *machine = cfun->machine;
10213
10214 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10215 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10216 machine->addr_list);
10217 }
10218
10219 /* Output a case vector. */
10220
10221 static void
10222 unicosmk_output_addr_vec (FILE *file, rtx vec)
10223 {
10224 rtx lab = XEXP (vec, 0);
10225 rtx body = XEXP (vec, 1);
10226 int vlen = XVECLEN (body, 0);
10227 int idx;
10228
10229 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10230
10231 for (idx = 0; idx < vlen; idx++)
10232 {
10233 ASM_OUTPUT_ADDR_VEC_ELT
10234 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10235 }
10236 }
10237
10238 /* Output current function's deferred case vectors. */
10239
10240 static void
10241 unicosmk_output_deferred_case_vectors (FILE *file)
10242 {
10243 struct machine_function *machine = cfun->machine;
10244 rtx t;
10245
10246 if (machine->addr_list == NULL_RTX)
10247 return;
10248
10249 switch_to_section (data_section);
10250 for (t = machine->addr_list; t; t = XEXP (t, 1))
10251 unicosmk_output_addr_vec (file, XEXP (t, 0));
10252 }
10253
10254 /* Generate the name of the SSIB section for the current function. */
10255
10256 #define SSIB_PREFIX "__SSIB_"
10257 #define SSIB_PREFIX_LEN 7
10258
10259 static const char *
10260 unicosmk_ssib_name (void)
10261 {
10262 /* This is ok since CAM won't be able to deal with names longer than that
10263 anyway. */
10264
10265 static char name[256];
10266
10267 rtx x;
10268 const char *fnname;
10269 int len;
10270
10271 x = DECL_RTL (cfun->decl);
10272 gcc_assert (MEM_P (x));
10273 x = XEXP (x, 0);
10274 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10275 fnname = XSTR (x, 0);
10276
10277 len = strlen (fnname);
10278 if (len + SSIB_PREFIX_LEN > 255)
10279 len = 255 - SSIB_PREFIX_LEN;
10280
10281 strcpy (name, SSIB_PREFIX);
10282 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10283 name[len + SSIB_PREFIX_LEN] = 0;
10284
10285 return name;
10286 }
10287
10288 /* Set up the dynamic subprogram information block (DSIB) and update the
10289 frame pointer register ($15) for subroutines which have a frame. If the
10290 subroutine doesn't have a frame, simply increment $15. */
10291
10292 static void
10293 unicosmk_gen_dsib (unsigned long *imaskP)
10294 {
10295 if (alpha_procedure_type == PT_STACK)
10296 {
10297 const char *ssib_name;
10298 rtx mem;
10299
10300 /* Allocate 64 bytes for the DSIB. */
10301
10302 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10303 GEN_INT (-64))));
10304 emit_insn (gen_blockage ());
10305
10306 /* Save the return address. */
10307
10308 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10309 set_mem_alias_set (mem, alpha_sr_alias_set);
10310 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10311 (*imaskP) &= ~(1UL << REG_RA);
10312
10313 /* Save the old frame pointer. */
10314
10315 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10316 set_mem_alias_set (mem, alpha_sr_alias_set);
10317 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10318 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10319
10320 emit_insn (gen_blockage ());
10321
10322 /* Store the SSIB pointer. */
10323
10324 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10325 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10326 set_mem_alias_set (mem, alpha_sr_alias_set);
10327
10328 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10329 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10330 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10331
10332 /* Save the CIW index. */
10333
10334 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10335 set_mem_alias_set (mem, alpha_sr_alias_set);
10336 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10337
10338 emit_insn (gen_blockage ());
10339
10340 /* Set the new frame pointer. */
10341 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10342 stack_pointer_rtx, GEN_INT (64))));
10343 }
10344 else
10345 {
10346 /* Increment the frame pointer register to indicate that we do not
10347 have a frame. */
10348 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10349 hard_frame_pointer_rtx, const1_rtx));
10350 }
10351 }
10352
10353 /* Output the static subroutine information block for the current
10354 function. */
10355
10356 static void
10357 unicosmk_output_ssib (FILE *file, const char *fnname)
10358 {
10359 int len;
10360 int i;
10361 rtx x;
10362 rtx ciw;
10363 struct machine_function *machine = cfun->machine;
10364
10365 in_section = NULL;
10366 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10367 unicosmk_ssib_name ());
10368
10369 /* Some required stuff and the function name length. */
10370
10371 len = strlen (fnname);
10372 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10373
10374 /* Saved registers
10375 ??? We don't do that yet. */
10376
10377 fputs ("\t.quad\t0\n", file);
10378
10379 /* Function address. */
10380
10381 fputs ("\t.quad\t", file);
10382 assemble_name (file, fnname);
10383 putc ('\n', file);
10384
10385 fputs ("\t.quad\t0\n", file);
10386 fputs ("\t.quad\t0\n", file);
10387
10388 /* Function name.
10389 ??? We do it the same way Cray CC does it but this could be
10390 simplified. */
10391
10392 for( i = 0; i < len; i++ )
10393 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10394 if( (len % 8) == 0 )
10395 fputs ("\t.quad\t0\n", file);
10396 else
10397 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10398
10399 /* All call information words used in the function. */
10400
10401 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10402 {
10403 ciw = XEXP (x, 0);
10404 #if HOST_BITS_PER_WIDE_INT == 32
10405 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10406 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10407 #else
10408 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10409 #endif
10410 }
10411 }
10412
10413 /* Add a call information word (CIW) to the list of the current function's
10414 CIWs and return its index.
10415
10416 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10417
10418 rtx
10419 unicosmk_add_call_info_word (rtx x)
10420 {
10421 rtx node;
10422 struct machine_function *machine = cfun->machine;
10423
10424 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10425 if (machine->first_ciw == NULL_RTX)
10426 machine->first_ciw = node;
10427 else
10428 XEXP (machine->last_ciw, 1) = node;
10429
10430 machine->last_ciw = node;
10431 ++machine->ciw_count;
10432
10433 return GEN_INT (machine->ciw_count
10434 + strlen (current_function_name ())/8 + 5);
10435 }
10436
10437 /* The Cray assembler doesn't accept extern declarations for symbols which
10438 are defined in the same file. We have to keep track of all global
10439 symbols which are referenced and/or defined in a source file and output
10440 extern declarations for those which are referenced but not defined at
10441 the end of file. */
10442
10443 /* List of identifiers for which an extern declaration might have to be
10444 emitted. */
10445 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10446
10447 struct unicosmk_extern_list
10448 {
10449 struct unicosmk_extern_list *next;
10450 const char *name;
10451 };
10452
10453 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10454
10455 /* Output extern declarations which are required for every asm file. */
10456
10457 static void
10458 unicosmk_output_default_externs (FILE *file)
10459 {
10460 static const char *const externs[] =
10461 { "__T3E_MISMATCH" };
10462
10463 int i;
10464 int n;
10465
10466 n = ARRAY_SIZE (externs);
10467
10468 for (i = 0; i < n; i++)
10469 fprintf (file, "\t.extern\t%s\n", externs[i]);
10470 }
10471
10472 /* Output extern declarations for global symbols which are have been
10473 referenced but not defined. */
10474
10475 static void
10476 unicosmk_output_externs (FILE *file)
10477 {
10478 struct unicosmk_extern_list *p;
10479 const char *real_name;
10480 int len;
10481 tree name_tree;
10482
10483 len = strlen (user_label_prefix);
10484 for (p = unicosmk_extern_head; p != 0; p = p->next)
10485 {
10486 /* We have to strip the encoding and possibly remove user_label_prefix
10487 from the identifier in order to handle -fleading-underscore and
10488 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10489 real_name = default_strip_name_encoding (p->name);
10490 if (len && p->name[0] == '*'
10491 && !memcmp (real_name, user_label_prefix, len))
10492 real_name += len;
10493
10494 name_tree = get_identifier (real_name);
10495 if (! TREE_ASM_WRITTEN (name_tree))
10496 {
10497 TREE_ASM_WRITTEN (name_tree) = 1;
10498 fputs ("\t.extern\t", file);
10499 assemble_name (file, p->name);
10500 putc ('\n', file);
10501 }
10502 }
10503 }
10504
10505 /* Record an extern. */
10506
10507 void
10508 unicosmk_add_extern (const char *name)
10509 {
10510 struct unicosmk_extern_list *p;
10511
10512 p = (struct unicosmk_extern_list *)
10513 xmalloc (sizeof (struct unicosmk_extern_list));
10514 p->next = unicosmk_extern_head;
10515 p->name = name;
10516 unicosmk_extern_head = p;
10517 }
10518
10519 /* The Cray assembler generates incorrect code if identifiers which
10520 conflict with register names are used as instruction operands. We have
10521 to replace such identifiers with DEX expressions. */
10522
10523 /* Structure to collect identifiers which have been replaced by DEX
10524 expressions. */
10525 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10526
10527 struct unicosmk_dex {
10528 struct unicosmk_dex *next;
10529 const char *name;
10530 };
10531
10532 /* List of identifiers which have been replaced by DEX expressions. The DEX
10533 number is determined by the position in the list. */
10534
10535 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10536
10537 /* The number of elements in the DEX list. */
10538
10539 static int unicosmk_dex_count = 0;
10540
10541 /* Check if NAME must be replaced by a DEX expression. */
10542
10543 static int
10544 unicosmk_special_name (const char *name)
10545 {
10546 if (name[0] == '*')
10547 ++name;
10548
10549 if (name[0] == '$')
10550 ++name;
10551
10552 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10553 return 0;
10554
10555 switch (name[1])
10556 {
10557 case '1': case '2':
10558 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10559
10560 case '3':
10561 return (name[2] == '\0'
10562 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10563
10564 default:
10565 return (ISDIGIT (name[1]) && name[2] == '\0');
10566 }
10567 }
10568
10569 /* Return the DEX number if X must be replaced by a DEX expression and 0
10570 otherwise. */
10571
10572 static int
10573 unicosmk_need_dex (rtx x)
10574 {
10575 struct unicosmk_dex *dex;
10576 const char *name;
10577 int i;
10578
10579 if (GET_CODE (x) != SYMBOL_REF)
10580 return 0;
10581
10582 name = XSTR (x,0);
10583 if (! unicosmk_special_name (name))
10584 return 0;
10585
10586 i = unicosmk_dex_count;
10587 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10588 {
10589 if (! strcmp (name, dex->name))
10590 return i;
10591 --i;
10592 }
10593
10594 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10595 dex->name = name;
10596 dex->next = unicosmk_dex_list;
10597 unicosmk_dex_list = dex;
10598
10599 ++unicosmk_dex_count;
10600 return unicosmk_dex_count;
10601 }
10602
10603 /* Output the DEX definitions for this file. */
10604
10605 static void
10606 unicosmk_output_dex (FILE *file)
10607 {
10608 struct unicosmk_dex *dex;
10609 int i;
10610
10611 if (unicosmk_dex_list == NULL)
10612 return;
10613
10614 fprintf (file, "\t.dexstart\n");
10615
10616 i = unicosmk_dex_count;
10617 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10618 {
10619 fprintf (file, "\tDEX (%d) = ", i);
10620 assemble_name (file, dex->name);
10621 putc ('\n', file);
10622 --i;
10623 }
10624
10625 fprintf (file, "\t.dexend\n");
10626 }
10627
10628 /* Output text that to appear at the beginning of an assembler file. */
10629
10630 static void
10631 unicosmk_file_start (void)
10632 {
10633 int i;
10634
10635 fputs ("\t.ident\t", asm_out_file);
10636 unicosmk_output_module_name (asm_out_file);
10637 fputs ("\n\n", asm_out_file);
10638
10639 /* The Unicos/Mk assembler uses different register names. Instead of trying
10640 to support them, we simply use micro definitions. */
10641
10642 /* CAM has different register names: rN for the integer register N and fN
10643 for the floating-point register N. Instead of trying to use these in
10644 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10645 register. */
10646
10647 for (i = 0; i < 32; ++i)
10648 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10649
10650 for (i = 0; i < 32; ++i)
10651 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10652
10653 putc ('\n', asm_out_file);
10654
10655 /* The .align directive fill unused space with zeroes which does not work
10656 in code sections. We define the macro 'gcc@code@align' which uses nops
10657 instead. Note that it assumes that code sections always have the
10658 biggest possible alignment since . refers to the current offset from
10659 the beginning of the section. */
10660
10661 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10662 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10663 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10664 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10665 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10666 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10667 fputs ("\t.endr\n", asm_out_file);
10668 fputs ("\t.endif\n", asm_out_file);
10669 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10670
10671 /* Output extern declarations which should always be visible. */
10672 unicosmk_output_default_externs (asm_out_file);
10673
10674 /* Open a dummy section. We always need to be inside a section for the
10675 section-switching code to work correctly.
10676 ??? This should be a module id or something like that. I still have to
10677 figure out what the rules for those are. */
10678 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10679 }
10680
10681 /* Output text to appear at the end of an assembler file. This includes all
10682 pending extern declarations and DEX expressions. */
10683
10684 static void
10685 unicosmk_file_end (void)
10686 {
10687 fputs ("\t.endp\n\n", asm_out_file);
10688
10689 /* Output all pending externs. */
10690
10691 unicosmk_output_externs (asm_out_file);
10692
10693 /* Output dex definitions used for functions whose names conflict with
10694 register names. */
10695
10696 unicosmk_output_dex (asm_out_file);
10697
10698 fputs ("\t.end\t", asm_out_file);
10699 unicosmk_output_module_name (asm_out_file);
10700 putc ('\n', asm_out_file);
10701 }
10702
10703 #else
10704
10705 static void
10706 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10707 {}
10708
10709 static void
10710 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10711 {}
10712
10713 static void
10714 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10715 const char * fnname ATTRIBUTE_UNUSED)
10716 {}
10717
10718 rtx
10719 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10720 {
10721 return NULL_RTX;
10722 }
10723
10724 static int
10725 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10726 {
10727 return 0;
10728 }
10729
10730 #endif /* TARGET_ABI_UNICOSMK */
10731
10732 static void
10733 alpha_init_libfuncs (void)
10734 {
10735 if (TARGET_ABI_UNICOSMK)
10736 {
10737 /* Prevent gcc from generating calls to __divsi3. */
10738 set_optab_libfunc (sdiv_optab, SImode, 0);
10739 set_optab_libfunc (udiv_optab, SImode, 0);
10740
10741 /* Use the functions provided by the system library
10742 for DImode integer division. */
10743 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10744 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10745 }
10746 else if (TARGET_ABI_OPEN_VMS)
10747 {
10748 /* Use the VMS runtime library functions for division and
10749 remainder. */
10750 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10751 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10752 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10753 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10754 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10755 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10756 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10757 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10758 }
10759 }
10760
10761 \f
10762 /* Initialize the GCC target structure. */
10763 #if TARGET_ABI_OPEN_VMS
10764 # undef TARGET_ATTRIBUTE_TABLE
10765 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10766 # undef TARGET_SECTION_TYPE_FLAGS
10767 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10768 #endif
10769
10770 #undef TARGET_IN_SMALL_DATA_P
10771 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10772
10773 #if TARGET_ABI_UNICOSMK
10774 # undef TARGET_INSERT_ATTRIBUTES
10775 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10776 # undef TARGET_SECTION_TYPE_FLAGS
10777 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10778 # undef TARGET_ASM_UNIQUE_SECTION
10779 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10780 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10781 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10782 # undef TARGET_ASM_GLOBALIZE_LABEL
10783 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10784 # undef TARGET_MUST_PASS_IN_STACK
10785 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10786 #endif
10787
10788 #undef TARGET_ASM_ALIGNED_HI_OP
10789 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10790 #undef TARGET_ASM_ALIGNED_DI_OP
10791 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10792
10793 /* Default unaligned ops are provided for ELF systems. To get unaligned
10794 data for non-ELF systems, we have to turn off auto alignment. */
10795 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
10796 #undef TARGET_ASM_UNALIGNED_HI_OP
10797 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10798 #undef TARGET_ASM_UNALIGNED_SI_OP
10799 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10800 #undef TARGET_ASM_UNALIGNED_DI_OP
10801 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10802 #endif
10803
10804 #ifdef OBJECT_FORMAT_ELF
10805 #undef TARGET_ASM_RELOC_RW_MASK
10806 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10807 #undef TARGET_ASM_SELECT_RTX_SECTION
10808 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10809 #undef TARGET_SECTION_TYPE_FLAGS
10810 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10811 #endif
10812
10813 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10814 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10815
10816 #undef TARGET_INIT_LIBFUNCS
10817 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10818
10819 #undef TARGET_LEGITIMIZE_ADDRESS
10820 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
10821
10822 #if TARGET_ABI_UNICOSMK
10823 #undef TARGET_ASM_FILE_START
10824 #define TARGET_ASM_FILE_START unicosmk_file_start
10825 #undef TARGET_ASM_FILE_END
10826 #define TARGET_ASM_FILE_END unicosmk_file_end
10827 #else
10828 #undef TARGET_ASM_FILE_START
10829 #define TARGET_ASM_FILE_START alpha_file_start
10830 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10831 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10832 #endif
10833
10834 #undef TARGET_SCHED_ADJUST_COST
10835 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10836 #undef TARGET_SCHED_ISSUE_RATE
10837 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10838 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10839 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10840 alpha_multipass_dfa_lookahead
10841
10842 #undef TARGET_HAVE_TLS
10843 #define TARGET_HAVE_TLS HAVE_AS_TLS
10844
10845 #undef TARGET_INIT_BUILTINS
10846 #define TARGET_INIT_BUILTINS alpha_init_builtins
10847 #undef TARGET_EXPAND_BUILTIN
10848 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10849 #undef TARGET_FOLD_BUILTIN
10850 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10851
10852 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10853 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10854 #undef TARGET_CANNOT_COPY_INSN_P
10855 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10856 #undef TARGET_CANNOT_FORCE_CONST_MEM
10857 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10858
10859 #if TARGET_ABI_OSF
10860 #undef TARGET_ASM_OUTPUT_MI_THUNK
10861 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10862 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10863 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10864 #undef TARGET_STDARG_OPTIMIZE_HOOK
10865 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10866 #endif
10867
10868 #undef TARGET_RTX_COSTS
10869 #define TARGET_RTX_COSTS alpha_rtx_costs
10870 #undef TARGET_ADDRESS_COST
10871 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
10872
10873 #undef TARGET_MACHINE_DEPENDENT_REORG
10874 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10875
10876 #undef TARGET_PROMOTE_FUNCTION_MODE
10877 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
10878 #undef TARGET_PROMOTE_PROTOTYPES
10879 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10880 #undef TARGET_RETURN_IN_MEMORY
10881 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10882 #undef TARGET_PASS_BY_REFERENCE
10883 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10884 #undef TARGET_SETUP_INCOMING_VARARGS
10885 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10886 #undef TARGET_STRICT_ARGUMENT_NAMING
10887 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10888 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10889 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10890 #undef TARGET_SPLIT_COMPLEX_ARG
10891 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10892 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10893 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10894 #undef TARGET_ARG_PARTIAL_BYTES
10895 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10896
10897 #undef TARGET_SECONDARY_RELOAD
10898 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10899
10900 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10901 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10902 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10903 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10904
10905 #undef TARGET_BUILD_BUILTIN_VA_LIST
10906 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10907
10908 #undef TARGET_EXPAND_BUILTIN_VA_START
10909 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10910
10911 /* The Alpha architecture does not require sequential consistency. See
10912 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10913 for an example of how it can be violated in practice. */
10914 #undef TARGET_RELAXED_ORDERING
10915 #define TARGET_RELAXED_ORDERING true
10916
10917 #undef TARGET_DEFAULT_TARGET_FLAGS
10918 #define TARGET_DEFAULT_TARGET_FLAGS \
10919 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10920 #undef TARGET_HANDLE_OPTION
10921 #define TARGET_HANDLE_OPTION alpha_handle_option
10922
10923 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10924 #undef TARGET_MANGLE_TYPE
10925 #define TARGET_MANGLE_TYPE alpha_mangle_type
10926 #endif
10927
10928 #undef TARGET_LEGITIMATE_ADDRESS_P
10929 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10930
10931 struct gcc_target targetm = TARGET_INITIALIZER;
10932
10933 \f
10934 #include "gt-alpha.h"