re PR target/28623 (ICE in extract_insn, at recog.c:2077 (nrecognizable insn) [alpha])
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "tree-gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59
60
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
63
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
66
67 static const char * const alpha_cpu_name[] =
68 {
69 "ev4", "ev5", "ev6"
70 };
71
72 /* Specify how accurate floating-point traps need to be. */
73
74 enum alpha_trap_precision alpha_tp;
75
76 /* Specify the floating-point rounding mode. */
77
78 enum alpha_fp_rounding_mode alpha_fprm;
79
80 /* Specify which things cause traps. */
81
82 enum alpha_fp_trap_mode alpha_fptm;
83
84 /* Save information from a "cmpxx" operation until the branch or scc is
85 emitted. */
86
87 struct alpha_compare alpha_compare;
88
89 /* Nonzero if inside of a function, because the Alpha asm can't
90 handle .files inside of functions. */
91
92 static int inside_function = FALSE;
93
94 /* The number of cycles of latency we should assume on memory reads. */
95
96 int alpha_memory_latency = 3;
97
98 /* Whether the function needs the GP. */
99
100 static int alpha_function_needs_gp;
101
102 /* The alias set for prologue/epilogue register save/restore. */
103
104 static GTY(()) int alpha_sr_alias_set;
105
106 /* The assembler name of the current function. */
107
108 static const char *alpha_fnname;
109
110 /* The next explicit relocation sequence number. */
111 extern GTY(()) int alpha_next_sequence_number;
112 int alpha_next_sequence_number = 1;
113
114 /* The literal and gpdisp sequence numbers for this insn, as printed
115 by %# and %* respectively. */
116 extern GTY(()) int alpha_this_literal_sequence_number;
117 extern GTY(()) int alpha_this_gpdisp_sequence_number;
118 int alpha_this_literal_sequence_number;
119 int alpha_this_gpdisp_sequence_number;
120
121 /* Costs of various operations on the different architectures. */
122
123 struct alpha_rtx_cost_data
124 {
125 unsigned char fp_add;
126 unsigned char fp_mult;
127 unsigned char fp_div_sf;
128 unsigned char fp_div_df;
129 unsigned char int_mult_si;
130 unsigned char int_mult_di;
131 unsigned char int_shift;
132 unsigned char int_cmov;
133 unsigned short int_div;
134 };
135
136 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
137 {
138 { /* EV4 */
139 COSTS_N_INSNS (6), /* fp_add */
140 COSTS_N_INSNS (6), /* fp_mult */
141 COSTS_N_INSNS (34), /* fp_div_sf */
142 COSTS_N_INSNS (63), /* fp_div_df */
143 COSTS_N_INSNS (23), /* int_mult_si */
144 COSTS_N_INSNS (23), /* int_mult_di */
145 COSTS_N_INSNS (2), /* int_shift */
146 COSTS_N_INSNS (2), /* int_cmov */
147 COSTS_N_INSNS (97), /* int_div */
148 },
149 { /* EV5 */
150 COSTS_N_INSNS (4), /* fp_add */
151 COSTS_N_INSNS (4), /* fp_mult */
152 COSTS_N_INSNS (15), /* fp_div_sf */
153 COSTS_N_INSNS (22), /* fp_div_df */
154 COSTS_N_INSNS (8), /* int_mult_si */
155 COSTS_N_INSNS (12), /* int_mult_di */
156 COSTS_N_INSNS (1) + 1, /* int_shift */
157 COSTS_N_INSNS (1), /* int_cmov */
158 COSTS_N_INSNS (83), /* int_div */
159 },
160 { /* EV6 */
161 COSTS_N_INSNS (4), /* fp_add */
162 COSTS_N_INSNS (4), /* fp_mult */
163 COSTS_N_INSNS (12), /* fp_div_sf */
164 COSTS_N_INSNS (15), /* fp_div_df */
165 COSTS_N_INSNS (7), /* int_mult_si */
166 COSTS_N_INSNS (7), /* int_mult_di */
167 COSTS_N_INSNS (1), /* int_shift */
168 COSTS_N_INSNS (2), /* int_cmov */
169 COSTS_N_INSNS (86), /* int_div */
170 },
171 };
172
173 /* Similar but tuned for code size instead of execution latency. The
174 extra +N is fractional cost tuning based on latency. It's used to
175 encourage use of cheaper insns like shift, but only if there's just
176 one of them. */
177
178 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
179 {
180 COSTS_N_INSNS (1), /* fp_add */
181 COSTS_N_INSNS (1), /* fp_mult */
182 COSTS_N_INSNS (1), /* fp_div_sf */
183 COSTS_N_INSNS (1) + 1, /* fp_div_df */
184 COSTS_N_INSNS (1) + 1, /* int_mult_si */
185 COSTS_N_INSNS (1) + 2, /* int_mult_di */
186 COSTS_N_INSNS (1), /* int_shift */
187 COSTS_N_INSNS (1), /* int_cmov */
188 COSTS_N_INSNS (6), /* int_div */
189 };
190
191 /* Get the number of args of a function in one of two ways. */
192 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
193 #define NUM_ARGS current_function_args_info.num_args
194 #else
195 #define NUM_ARGS current_function_args_info
196 #endif
197
198 #define REG_PV 27
199 #define REG_RA 26
200
201 /* Declarations of static functions. */
202 static struct machine_function *alpha_init_machine_status (void);
203 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
204
205 #if TARGET_ABI_OPEN_VMS
206 static void alpha_write_linkage (FILE *, const char *, tree);
207 #endif
208
209 static void unicosmk_output_deferred_case_vectors (FILE *);
210 static void unicosmk_gen_dsib (unsigned long *);
211 static void unicosmk_output_ssib (FILE *, const char *);
212 static int unicosmk_need_dex (rtx);
213 \f
214 /* Implement TARGET_HANDLE_OPTION. */
215
216 static bool
217 alpha_handle_option (size_t code, const char *arg, int value)
218 {
219 switch (code)
220 {
221 case OPT_mfp_regs:
222 if (value == 0)
223 target_flags |= MASK_SOFT_FP;
224 break;
225
226 case OPT_mieee:
227 case OPT_mieee_with_inexact:
228 target_flags |= MASK_IEEE_CONFORMANT;
229 break;
230
231 case OPT_mtls_size_:
232 if (value != 16 && value != 32 && value != 64)
233 error ("bad value %qs for -mtls-size switch", arg);
234 break;
235 }
236
237 return true;
238 }
239
240 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
241 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
242
243 static const char *
244 alpha_mangle_fundamental_type (tree type)
245 {
246 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
247 && TARGET_LONG_DOUBLE_128)
248 return "g";
249
250 /* For all other types, use normal C++ mangling. */
251 return NULL;
252 }
253 #endif
254
255 /* Parse target option strings. */
256
257 void
258 override_options (void)
259 {
260 static const struct cpu_table {
261 const char *const name;
262 const enum processor_type processor;
263 const int flags;
264 } cpu_table[] = {
265 { "ev4", PROCESSOR_EV4, 0 },
266 { "ev45", PROCESSOR_EV4, 0 },
267 { "21064", PROCESSOR_EV4, 0 },
268 { "ev5", PROCESSOR_EV5, 0 },
269 { "21164", PROCESSOR_EV5, 0 },
270 { "ev56", PROCESSOR_EV5, MASK_BWX },
271 { "21164a", PROCESSOR_EV5, MASK_BWX },
272 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
275 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
277 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
279 { 0, 0, 0 }
280 };
281
282 int i;
283
284 /* Unicos/Mk doesn't have shared libraries. */
285 if (TARGET_ABI_UNICOSMK && flag_pic)
286 {
287 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
288 (flag_pic > 1) ? "PIC" : "pic");
289 flag_pic = 0;
290 }
291
292 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
293 floating-point instructions. Make that the default for this target. */
294 if (TARGET_ABI_UNICOSMK)
295 alpha_fprm = ALPHA_FPRM_DYN;
296 else
297 alpha_fprm = ALPHA_FPRM_NORM;
298
299 alpha_tp = ALPHA_TP_PROG;
300 alpha_fptm = ALPHA_FPTM_N;
301
302 /* We cannot use su and sui qualifiers for conversion instructions on
303 Unicos/Mk. I'm not sure if this is due to assembler or hardware
304 limitations. Right now, we issue a warning if -mieee is specified
305 and then ignore it; eventually, we should either get it right or
306 disable the option altogether. */
307
308 if (TARGET_IEEE)
309 {
310 if (TARGET_ABI_UNICOSMK)
311 warning (0, "-mieee not supported on Unicos/Mk");
312 else
313 {
314 alpha_tp = ALPHA_TP_INSN;
315 alpha_fptm = ALPHA_FPTM_SU;
316 }
317 }
318
319 if (TARGET_IEEE_WITH_INEXACT)
320 {
321 if (TARGET_ABI_UNICOSMK)
322 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
323 else
324 {
325 alpha_tp = ALPHA_TP_INSN;
326 alpha_fptm = ALPHA_FPTM_SUI;
327 }
328 }
329
330 if (alpha_tp_string)
331 {
332 if (! strcmp (alpha_tp_string, "p"))
333 alpha_tp = ALPHA_TP_PROG;
334 else if (! strcmp (alpha_tp_string, "f"))
335 alpha_tp = ALPHA_TP_FUNC;
336 else if (! strcmp (alpha_tp_string, "i"))
337 alpha_tp = ALPHA_TP_INSN;
338 else
339 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
340 }
341
342 if (alpha_fprm_string)
343 {
344 if (! strcmp (alpha_fprm_string, "n"))
345 alpha_fprm = ALPHA_FPRM_NORM;
346 else if (! strcmp (alpha_fprm_string, "m"))
347 alpha_fprm = ALPHA_FPRM_MINF;
348 else if (! strcmp (alpha_fprm_string, "c"))
349 alpha_fprm = ALPHA_FPRM_CHOP;
350 else if (! strcmp (alpha_fprm_string,"d"))
351 alpha_fprm = ALPHA_FPRM_DYN;
352 else
353 error ("bad value %qs for -mfp-rounding-mode switch",
354 alpha_fprm_string);
355 }
356
357 if (alpha_fptm_string)
358 {
359 if (strcmp (alpha_fptm_string, "n") == 0)
360 alpha_fptm = ALPHA_FPTM_N;
361 else if (strcmp (alpha_fptm_string, "u") == 0)
362 alpha_fptm = ALPHA_FPTM_U;
363 else if (strcmp (alpha_fptm_string, "su") == 0)
364 alpha_fptm = ALPHA_FPTM_SU;
365 else if (strcmp (alpha_fptm_string, "sui") == 0)
366 alpha_fptm = ALPHA_FPTM_SUI;
367 else
368 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
369 }
370
371 if (alpha_cpu_string)
372 {
373 for (i = 0; cpu_table [i].name; i++)
374 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
375 {
376 alpha_tune = alpha_cpu = cpu_table [i].processor;
377 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
378 target_flags |= cpu_table [i].flags;
379 break;
380 }
381 if (! cpu_table [i].name)
382 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
383 }
384
385 if (alpha_tune_string)
386 {
387 for (i = 0; cpu_table [i].name; i++)
388 if (! strcmp (alpha_tune_string, cpu_table [i].name))
389 {
390 alpha_tune = cpu_table [i].processor;
391 break;
392 }
393 if (! cpu_table [i].name)
394 error ("bad value %qs for -mcpu switch", alpha_tune_string);
395 }
396
397 /* Do some sanity checks on the above options. */
398
399 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
400 {
401 warning (0, "trap mode not supported on Unicos/Mk");
402 alpha_fptm = ALPHA_FPTM_N;
403 }
404
405 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
406 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
407 {
408 warning (0, "fp software completion requires -mtrap-precision=i");
409 alpha_tp = ALPHA_TP_INSN;
410 }
411
412 if (alpha_cpu == PROCESSOR_EV6)
413 {
414 /* Except for EV6 pass 1 (not released), we always have precise
415 arithmetic traps. Which means we can do software completion
416 without minding trap shadows. */
417 alpha_tp = ALPHA_TP_PROG;
418 }
419
420 if (TARGET_FLOAT_VAX)
421 {
422 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
423 {
424 warning (0, "rounding mode not supported for VAX floats");
425 alpha_fprm = ALPHA_FPRM_NORM;
426 }
427 if (alpha_fptm == ALPHA_FPTM_SUI)
428 {
429 warning (0, "trap mode not supported for VAX floats");
430 alpha_fptm = ALPHA_FPTM_SU;
431 }
432 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
433 warning (0, "128-bit long double not supported for VAX floats");
434 target_flags &= ~MASK_LONG_DOUBLE_128;
435 }
436
437 {
438 char *end;
439 int lat;
440
441 if (!alpha_mlat_string)
442 alpha_mlat_string = "L1";
443
444 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
445 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
446 ;
447 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
448 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
449 && alpha_mlat_string[2] == '\0')
450 {
451 static int const cache_latency[][4] =
452 {
453 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
454 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
455 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
456 };
457
458 lat = alpha_mlat_string[1] - '0';
459 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
460 {
461 warning (0, "L%d cache latency unknown for %s",
462 lat, alpha_cpu_name[alpha_tune]);
463 lat = 3;
464 }
465 else
466 lat = cache_latency[alpha_tune][lat-1];
467 }
468 else if (! strcmp (alpha_mlat_string, "main"))
469 {
470 /* Most current memories have about 370ns latency. This is
471 a reasonable guess for a fast cpu. */
472 lat = 150;
473 }
474 else
475 {
476 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
477 lat = 3;
478 }
479
480 alpha_memory_latency = lat;
481 }
482
483 /* Default the definition of "small data" to 8 bytes. */
484 if (!g_switch_set)
485 g_switch_value = 8;
486
487 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
488 if (flag_pic == 1)
489 target_flags |= MASK_SMALL_DATA;
490 else if (flag_pic == 2)
491 target_flags &= ~MASK_SMALL_DATA;
492
493 /* Align labels and loops for optimal branching. */
494 /* ??? Kludge these by not doing anything if we don't optimize and also if
495 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
496 if (optimize > 0 && write_symbols != SDB_DEBUG)
497 {
498 if (align_loops <= 0)
499 align_loops = 16;
500 if (align_jumps <= 0)
501 align_jumps = 16;
502 }
503 if (align_functions <= 0)
504 align_functions = 16;
505
506 /* Acquire a unique set number for our register saves and restores. */
507 alpha_sr_alias_set = new_alias_set ();
508
509 /* Register variables and functions with the garbage collector. */
510
511 /* Set up function hooks. */
512 init_machine_status = alpha_init_machine_status;
513
514 /* Tell the compiler when we're using VAX floating point. */
515 if (TARGET_FLOAT_VAX)
516 {
517 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
518 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
519 REAL_MODE_FORMAT (TFmode) = NULL;
520 }
521
522 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
523 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
524 target_flags |= MASK_LONG_DOUBLE_128;
525 #endif
526 }
527 \f
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529
530 int
531 zap_mask (HOST_WIDE_INT value)
532 {
533 int i;
534
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
536 i++, value >>= 8)
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
538 return 0;
539
540 return 1;
541 }
542
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
545
546 int
547 tls_symbolic_operand_1 (rtx op, int size, int unspec)
548 {
549 op = XEXP (op, 0);
550
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
552 return 0;
553 op = XVECEXP (op, 0, 0);
554
555 if (GET_CODE (op) != SYMBOL_REF)
556 return 0;
557
558 switch (SYMBOL_REF_TLS_MODEL (op))
559 {
560 case TLS_MODEL_LOCAL_DYNAMIC:
561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
562 case TLS_MODEL_INITIAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == 64;
564 case TLS_MODEL_LOCAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
566 default:
567 gcc_unreachable ();
568 }
569 }
570
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
573
574 rtx
575 resolve_reload_operand (rtx op)
576 {
577 if (reload_in_progress)
578 {
579 rtx tmp = op;
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
582 if (GET_CODE (tmp) == REG
583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 {
585 op = reg_equiv_memory_loc[REGNO (tmp)];
586 if (op == 0)
587 return 0;
588 }
589 }
590 return op;
591 }
592
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
597
598 static bool
599 alpha_scalar_mode_supported_p (enum machine_mode mode)
600 {
601 switch (mode)
602 {
603 case QImode:
604 case HImode:
605 case SImode:
606 case DImode:
607 case TImode: /* via optabs.c */
608 return true;
609
610 case SFmode:
611 case DFmode:
612 return true;
613
614 case TFmode:
615 return TARGET_HAS_XFLOATING_LIBS;
616
617 default:
618 return false;
619 }
620 }
621
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
626
627 static bool
628 alpha_vector_mode_supported_p (enum machine_mode mode)
629 {
630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
631 }
632
633 /* Return 1 if this function can directly return via $26. */
634
635 int
636 direct_return (void)
637 {
638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
639 && reload_completed
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && current_function_outgoing_args_size == 0
643 && current_function_pretend_args_size == 0);
644 }
645
646 /* Return the ADDR_VEC associated with a tablejump insn. */
647
648 rtx
649 alpha_tablejump_addr_vec (rtx insn)
650 {
651 rtx tmp;
652
653 tmp = JUMP_LABEL (insn);
654 if (!tmp)
655 return NULL_RTX;
656 tmp = NEXT_INSN (tmp);
657 if (!tmp)
658 return NULL_RTX;
659 if (GET_CODE (tmp) == JUMP_INSN
660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
662 return NULL_RTX;
663 }
664
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
666
667 rtx
668 alpha_tablejump_best_label (rtx insn)
669 {
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
672
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
675
676 if (jump_table)
677 {
678 int n_labels = XVECLEN (jump_table, 1);
679 int best_count = -1;
680 int i, j;
681
682 for (i = 0; i < n_labels; i++)
683 {
684 int count = 1;
685
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
689 count++;
690
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
693 }
694 }
695
696 return best_label ? best_label : const0_rtx;
697 }
698
699 /* Return the TLS model to use for SYMBOL. */
700
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol)
703 {
704 enum tls_model model;
705
706 if (GET_CODE (symbol) != SYMBOL_REF)
707 return 0;
708 model = SYMBOL_REF_TLS_MODEL (symbol);
709
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
713
714 return model;
715 }
716 \f
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
719
720 static bool
721 decl_has_samegp (tree decl)
722 {
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
726 return false;
727
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
733 return true;
734
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
739 }
740
741 /* Return true if EXP should be placed in the small data section. */
742
743 static bool
744 alpha_in_small_data_p (tree exp)
745 {
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
748 return false;
749
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
752 return false;
753
754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 {
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
759 return true;
760 }
761 else
762 {
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
768 return true;
769 }
770
771 return false;
772 }
773
774 #if TARGET_ABI_OPEN_VMS
775 static bool
776 alpha_linkage_symbol_p (const char *symname)
777 {
778 int symlen = strlen (symname);
779
780 if (symlen > 4)
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
782
783 return false;
784 }
785
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
793 #endif
794
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
798
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
803
804 bool
805 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
806 {
807 /* If this is an ldq_u type address, discard the outer AND. */
808 if (mode == DImode
809 && GET_CODE (x) == AND
810 && GET_CODE (XEXP (x, 1)) == CONST_INT
811 && INTVAL (XEXP (x, 1)) == -8)
812 x = XEXP (x, 0);
813
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
818 x = SUBREG_REG (x);
819
820 /* Unadorned general registers are valid. */
821 if (REG_P (x)
822 && (strict
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
825 return true;
826
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
829 return true;
830
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
833 return true;
834 #endif
835
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
838 {
839 rtx ofs = XEXP (x, 1);
840 x = XEXP (x, 0);
841
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
846 x = SUBREG_REG (x);
847
848 if (REG_P (x))
849 {
850 if (! strict
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
852 && GET_CODE (ofs) == CONST_INT)
853 return true;
854 if ((strict
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
858 return true;
859 }
860 }
861
862 /* If we're managing explicit relocations, LO_SUM is valid, as
863 are small data symbols. */
864 else if (TARGET_EXPLICIT_RELOCS)
865 {
866 if (small_symbolic_operand (x, Pmode))
867 return true;
868
869 if (GET_CODE (x) == LO_SUM)
870 {
871 rtx ofs = XEXP (x, 1);
872 x = XEXP (x, 0);
873
874 /* Discard non-paradoxical subregs. */
875 if (GET_CODE (x) == SUBREG
876 && (GET_MODE_SIZE (GET_MODE (x))
877 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
878 x = SUBREG_REG (x);
879
880 /* Must have a valid base register. */
881 if (! (REG_P (x)
882 && (strict
883 ? STRICT_REG_OK_FOR_BASE_P (x)
884 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
885 return false;
886
887 /* The symbol must be local. */
888 if (local_symbolic_operand (ofs, Pmode)
889 || dtp32_symbolic_operand (ofs, Pmode)
890 || tp32_symbolic_operand (ofs, Pmode))
891 return true;
892 }
893 }
894
895 return false;
896 }
897
898 /* Build the SYMBOL_REF for __tls_get_addr. */
899
900 static GTY(()) rtx tls_get_addr_libfunc;
901
902 static rtx
903 get_tls_get_addr (void)
904 {
905 if (!tls_get_addr_libfunc)
906 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
907 return tls_get_addr_libfunc;
908 }
909
910 /* Try machine-dependent ways of modifying an illegitimate address
911 to be legitimate. If we find one, return the new, valid address. */
912
913 rtx
914 alpha_legitimize_address (rtx x, rtx scratch,
915 enum machine_mode mode ATTRIBUTE_UNUSED)
916 {
917 HOST_WIDE_INT addend;
918
919 /* If the address is (plus reg const_int) and the CONST_INT is not a
920 valid offset, compute the high part of the constant and add it to
921 the register. Then our address is (plus temp low-part-const). */
922 if (GET_CODE (x) == PLUS
923 && GET_CODE (XEXP (x, 0)) == REG
924 && GET_CODE (XEXP (x, 1)) == CONST_INT
925 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
926 {
927 addend = INTVAL (XEXP (x, 1));
928 x = XEXP (x, 0);
929 goto split_addend;
930 }
931
932 /* If the address is (const (plus FOO const_int)), find the low-order
933 part of the CONST_INT. Then load FOO plus any high-order part of the
934 CONST_INT into a register. Our address is (plus reg low-part-const).
935 This is done to reduce the number of GOT entries. */
936 if (!no_new_pseudos
937 && GET_CODE (x) == CONST
938 && GET_CODE (XEXP (x, 0)) == PLUS
939 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
940 {
941 addend = INTVAL (XEXP (XEXP (x, 0), 1));
942 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
943 goto split_addend;
944 }
945
946 /* If we have a (plus reg const), emit the load as in (2), then add
947 the two registers, and finally generate (plus reg low-part-const) as
948 our address. */
949 if (!no_new_pseudos
950 && GET_CODE (x) == PLUS
951 && GET_CODE (XEXP (x, 0)) == REG
952 && GET_CODE (XEXP (x, 1)) == CONST
953 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
954 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
955 {
956 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
957 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
958 XEXP (XEXP (XEXP (x, 1), 0), 0),
959 NULL_RTX, 1, OPTAB_LIB_WIDEN);
960 goto split_addend;
961 }
962
963 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
964 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
965 {
966 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
967
968 switch (tls_symbolic_operand_type (x))
969 {
970 case TLS_MODEL_NONE:
971 break;
972
973 case TLS_MODEL_GLOBAL_DYNAMIC:
974 start_sequence ();
975
976 r0 = gen_rtx_REG (Pmode, 0);
977 r16 = gen_rtx_REG (Pmode, 16);
978 tga = get_tls_get_addr ();
979 dest = gen_reg_rtx (Pmode);
980 seq = GEN_INT (alpha_next_sequence_number++);
981
982 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
983 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
984 insn = emit_call_insn (insn);
985 CONST_OR_PURE_CALL_P (insn) = 1;
986 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
987
988 insn = get_insns ();
989 end_sequence ();
990
991 emit_libcall_block (insn, dest, r0, x);
992 return dest;
993
994 case TLS_MODEL_LOCAL_DYNAMIC:
995 start_sequence ();
996
997 r0 = gen_rtx_REG (Pmode, 0);
998 r16 = gen_rtx_REG (Pmode, 16);
999 tga = get_tls_get_addr ();
1000 scratch = gen_reg_rtx (Pmode);
1001 seq = GEN_INT (alpha_next_sequence_number++);
1002
1003 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1004 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1005 insn = emit_call_insn (insn);
1006 CONST_OR_PURE_CALL_P (insn) = 1;
1007 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1008
1009 insn = get_insns ();
1010 end_sequence ();
1011
1012 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1013 UNSPEC_TLSLDM_CALL);
1014 emit_libcall_block (insn, scratch, r0, eqv);
1015
1016 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1017 eqv = gen_rtx_CONST (Pmode, eqv);
1018
1019 if (alpha_tls_size == 64)
1020 {
1021 dest = gen_reg_rtx (Pmode);
1022 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1023 emit_insn (gen_adddi3 (dest, dest, scratch));
1024 return dest;
1025 }
1026 if (alpha_tls_size == 32)
1027 {
1028 insn = gen_rtx_HIGH (Pmode, eqv);
1029 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1030 scratch = gen_reg_rtx (Pmode);
1031 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1032 }
1033 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1034
1035 case TLS_MODEL_INITIAL_EXEC:
1036 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1037 eqv = gen_rtx_CONST (Pmode, eqv);
1038 tp = gen_reg_rtx (Pmode);
1039 scratch = gen_reg_rtx (Pmode);
1040 dest = gen_reg_rtx (Pmode);
1041
1042 emit_insn (gen_load_tp (tp));
1043 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1044 emit_insn (gen_adddi3 (dest, tp, scratch));
1045 return dest;
1046
1047 case TLS_MODEL_LOCAL_EXEC:
1048 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1049 eqv = gen_rtx_CONST (Pmode, eqv);
1050 tp = gen_reg_rtx (Pmode);
1051
1052 emit_insn (gen_load_tp (tp));
1053 if (alpha_tls_size == 32)
1054 {
1055 insn = gen_rtx_HIGH (Pmode, eqv);
1056 insn = gen_rtx_PLUS (Pmode, tp, insn);
1057 tp = gen_reg_rtx (Pmode);
1058 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1059 }
1060 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1061
1062 default:
1063 gcc_unreachable ();
1064 }
1065
1066 if (local_symbolic_operand (x, Pmode))
1067 {
1068 if (small_symbolic_operand (x, Pmode))
1069 return x;
1070 else
1071 {
1072 if (!no_new_pseudos)
1073 scratch = gen_reg_rtx (Pmode);
1074 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1075 gen_rtx_HIGH (Pmode, x)));
1076 return gen_rtx_LO_SUM (Pmode, scratch, x);
1077 }
1078 }
1079 }
1080
1081 return NULL;
1082
1083 split_addend:
1084 {
1085 HOST_WIDE_INT low, high;
1086
1087 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1088 addend -= low;
1089 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1090 addend -= high;
1091
1092 if (addend)
1093 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1094 (no_new_pseudos ? scratch : NULL_RTX),
1095 1, OPTAB_LIB_WIDEN);
1096 if (high)
1097 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1098 (no_new_pseudos ? scratch : NULL_RTX),
1099 1, OPTAB_LIB_WIDEN);
1100
1101 return plus_constant (x, low);
1102 }
1103 }
1104
1105 /* Primarily this is required for TLS symbols, but given that our move
1106 patterns *ought* to be able to handle any symbol at any time, we
1107 should never be spilling symbolic operands to the constant pool, ever. */
1108
1109 static bool
1110 alpha_cannot_force_const_mem (rtx x)
1111 {
1112 enum rtx_code code = GET_CODE (x);
1113 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1114 }
1115
1116 /* We do not allow indirect calls to be optimized into sibling calls, nor
1117 can we allow a call to a function with a different GP to be optimized
1118 into a sibcall. */
1119
1120 static bool
1121 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1122 {
1123 /* Can't do indirect tail calls, since we don't know if the target
1124 uses the same GP. */
1125 if (!decl)
1126 return false;
1127
1128 /* Otherwise, we can make a tail call if the target function shares
1129 the same GP. */
1130 return decl_has_samegp (decl);
1131 }
1132
1133 int
1134 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1135 {
1136 rtx x = *px;
1137
1138 /* Don't re-split. */
1139 if (GET_CODE (x) == LO_SUM)
1140 return -1;
1141
1142 return small_symbolic_operand (x, Pmode) != 0;
1143 }
1144
1145 static int
1146 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1147 {
1148 rtx x = *px;
1149
1150 /* Don't re-split. */
1151 if (GET_CODE (x) == LO_SUM)
1152 return -1;
1153
1154 if (small_symbolic_operand (x, Pmode))
1155 {
1156 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1157 *px = x;
1158 return -1;
1159 }
1160
1161 return 0;
1162 }
1163
1164 rtx
1165 split_small_symbolic_operand (rtx x)
1166 {
1167 x = copy_insn (x);
1168 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1169 return x;
1170 }
1171
1172 /* Indicate that INSN cannot be duplicated. This is true for any insn
1173 that we've marked with gpdisp relocs, since those have to stay in
1174 1-1 correspondence with one another.
1175
1176 Technically we could copy them if we could set up a mapping from one
1177 sequence number to another, across the set of insns to be duplicated.
1178 This seems overly complicated and error-prone since interblock motion
1179 from sched-ebb could move one of the pair of insns to a different block.
1180
1181 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1182 then they'll be in a different block from their ldgp. Which could lead
1183 the bb reorder code to think that it would be ok to copy just the block
1184 containing the call and branch to the block containing the ldgp. */
1185
1186 static bool
1187 alpha_cannot_copy_insn_p (rtx insn)
1188 {
1189 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1190 return false;
1191 if (recog_memoized (insn) >= 0)
1192 return get_attr_cannot_copy (insn);
1193 else
1194 return false;
1195 }
1196
1197
1198 /* Try a machine-dependent way of reloading an illegitimate address
1199 operand. If we find one, push the reload and return the new rtx. */
1200
1201 rtx
1202 alpha_legitimize_reload_address (rtx x,
1203 enum machine_mode mode ATTRIBUTE_UNUSED,
1204 int opnum, int type,
1205 int ind_levels ATTRIBUTE_UNUSED)
1206 {
1207 /* We must recognize output that we have already generated ourselves. */
1208 if (GET_CODE (x) == PLUS
1209 && GET_CODE (XEXP (x, 0)) == PLUS
1210 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1211 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1212 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1213 {
1214 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1215 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1216 opnum, type);
1217 return x;
1218 }
1219
1220 /* We wish to handle large displacements off a base register by
1221 splitting the addend across an ldah and the mem insn. This
1222 cuts number of extra insns needed from 3 to 1. */
1223 if (GET_CODE (x) == PLUS
1224 && GET_CODE (XEXP (x, 0)) == REG
1225 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1226 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1227 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 {
1229 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1230 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1231 HOST_WIDE_INT high
1232 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1233
1234 /* Check for 32-bit overflow. */
1235 if (high + low != val)
1236 return NULL_RTX;
1237
1238 /* Reload the high part into a base reg; leave the low part
1239 in the mem directly. */
1240 x = gen_rtx_PLUS (GET_MODE (x),
1241 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1242 GEN_INT (high)),
1243 GEN_INT (low));
1244
1245 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1246 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1247 opnum, type);
1248 return x;
1249 }
1250
1251 return NULL_RTX;
1252 }
1253 \f
1254 /* Compute a (partial) cost for rtx X. Return true if the complete
1255 cost has been computed, and false if subexpressions should be
1256 scanned. In either case, *TOTAL contains the cost result. */
1257
1258 static bool
1259 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1260 {
1261 enum machine_mode mode = GET_MODE (x);
1262 bool float_mode_p = FLOAT_MODE_P (mode);
1263 const struct alpha_rtx_cost_data *cost_data;
1264
1265 if (optimize_size)
1266 cost_data = &alpha_rtx_cost_size;
1267 else
1268 cost_data = &alpha_rtx_cost_data[alpha_tune];
1269
1270 switch (code)
1271 {
1272 case CONST_INT:
1273 /* If this is an 8-bit constant, return zero since it can be used
1274 nearly anywhere with no cost. If it is a valid operand for an
1275 ADD or AND, likewise return 0 if we know it will be used in that
1276 context. Otherwise, return 2 since it might be used there later.
1277 All other constants take at least two insns. */
1278 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1279 {
1280 *total = 0;
1281 return true;
1282 }
1283 /* FALLTHRU */
1284
1285 case CONST_DOUBLE:
1286 if (x == CONST0_RTX (mode))
1287 *total = 0;
1288 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1289 || (outer_code == AND && and_operand (x, VOIDmode)))
1290 *total = 0;
1291 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1292 *total = 2;
1293 else
1294 *total = COSTS_N_INSNS (2);
1295 return true;
1296
1297 case CONST:
1298 case SYMBOL_REF:
1299 case LABEL_REF:
1300 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1301 *total = COSTS_N_INSNS (outer_code != MEM);
1302 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1303 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1304 else if (tls_symbolic_operand_type (x))
1305 /* Estimate of cost for call_pal rduniq. */
1306 /* ??? How many insns do we emit here? More than one... */
1307 *total = COSTS_N_INSNS (15);
1308 else
1309 /* Otherwise we do a load from the GOT. */
1310 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1311 return true;
1312
1313 case HIGH:
1314 /* This is effectively an add_operand. */
1315 *total = 2;
1316 return true;
1317
1318 case PLUS:
1319 case MINUS:
1320 if (float_mode_p)
1321 *total = cost_data->fp_add;
1322 else if (GET_CODE (XEXP (x, 0)) == MULT
1323 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1324 {
1325 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1326 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1327 return true;
1328 }
1329 return false;
1330
1331 case MULT:
1332 if (float_mode_p)
1333 *total = cost_data->fp_mult;
1334 else if (mode == DImode)
1335 *total = cost_data->int_mult_di;
1336 else
1337 *total = cost_data->int_mult_si;
1338 return false;
1339
1340 case ASHIFT:
1341 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1342 && INTVAL (XEXP (x, 1)) <= 3)
1343 {
1344 *total = COSTS_N_INSNS (1);
1345 return false;
1346 }
1347 /* FALLTHRU */
1348
1349 case ASHIFTRT:
1350 case LSHIFTRT:
1351 *total = cost_data->int_shift;
1352 return false;
1353
1354 case IF_THEN_ELSE:
1355 if (float_mode_p)
1356 *total = cost_data->fp_add;
1357 else
1358 *total = cost_data->int_cmov;
1359 return false;
1360
1361 case DIV:
1362 case UDIV:
1363 case MOD:
1364 case UMOD:
1365 if (!float_mode_p)
1366 *total = cost_data->int_div;
1367 else if (mode == SFmode)
1368 *total = cost_data->fp_div_sf;
1369 else
1370 *total = cost_data->fp_div_df;
1371 return false;
1372
1373 case MEM:
1374 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1375 return true;
1376
1377 case NEG:
1378 if (! float_mode_p)
1379 {
1380 *total = COSTS_N_INSNS (1);
1381 return false;
1382 }
1383 /* FALLTHRU */
1384
1385 case ABS:
1386 if (! float_mode_p)
1387 {
1388 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1389 return false;
1390 }
1391 /* FALLTHRU */
1392
1393 case FLOAT:
1394 case UNSIGNED_FLOAT:
1395 case FIX:
1396 case UNSIGNED_FIX:
1397 case FLOAT_TRUNCATE:
1398 *total = cost_data->fp_add;
1399 return false;
1400
1401 case FLOAT_EXTEND:
1402 if (GET_CODE (XEXP (x, 0)) == MEM)
1403 *total = 0;
1404 else
1405 *total = cost_data->fp_add;
1406 return false;
1407
1408 default:
1409 return false;
1410 }
1411 }
1412 \f
1413 /* REF is an alignable memory location. Place an aligned SImode
1414 reference into *PALIGNED_MEM and the number of bits to shift into
1415 *PBITNUM. SCRATCH is a free register for use in reloading out
1416 of range stack slots. */
1417
1418 void
1419 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1420 {
1421 rtx base;
1422 HOST_WIDE_INT disp, offset;
1423
1424 gcc_assert (GET_CODE (ref) == MEM);
1425
1426 if (reload_in_progress
1427 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1428 {
1429 base = find_replacement (&XEXP (ref, 0));
1430 gcc_assert (memory_address_p (GET_MODE (ref), base));
1431 }
1432 else
1433 base = XEXP (ref, 0);
1434
1435 if (GET_CODE (base) == PLUS)
1436 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1437 else
1438 disp = 0;
1439
1440 /* Find the byte offset within an aligned word. If the memory itself is
1441 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1442 will have examined the base register and determined it is aligned, and
1443 thus displacements from it are naturally alignable. */
1444 if (MEM_ALIGN (ref) >= 32)
1445 offset = 0;
1446 else
1447 offset = disp & 3;
1448
1449 /* Access the entire aligned word. */
1450 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1451
1452 /* Convert the byte offset within the word to a bit offset. */
1453 if (WORDS_BIG_ENDIAN)
1454 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1455 else
1456 offset *= 8;
1457 *pbitnum = GEN_INT (offset);
1458 }
1459
1460 /* Similar, but just get the address. Handle the two reload cases.
1461 Add EXTRA_OFFSET to the address we return. */
1462
1463 rtx
1464 get_unaligned_address (rtx ref)
1465 {
1466 rtx base;
1467 HOST_WIDE_INT offset = 0;
1468
1469 gcc_assert (GET_CODE (ref) == MEM);
1470
1471 if (reload_in_progress
1472 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1473 {
1474 base = find_replacement (&XEXP (ref, 0));
1475
1476 gcc_assert (memory_address_p (GET_MODE (ref), base));
1477 }
1478 else
1479 base = XEXP (ref, 0);
1480
1481 if (GET_CODE (base) == PLUS)
1482 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1483
1484 return plus_constant (base, offset);
1485 }
1486
1487 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1488 X is always returned in a register. */
1489
1490 rtx
1491 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1492 {
1493 if (GET_CODE (addr) == PLUS)
1494 {
1495 ofs += INTVAL (XEXP (addr, 1));
1496 addr = XEXP (addr, 0);
1497 }
1498
1499 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1500 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1501 }
1502
1503 /* On the Alpha, all (non-symbolic) constants except zero go into
1504 a floating-point register via memory. Note that we cannot
1505 return anything that is not a subset of CLASS, and that some
1506 symbolic constants cannot be dropped to memory. */
1507
1508 enum reg_class
1509 alpha_preferred_reload_class(rtx x, enum reg_class class)
1510 {
1511 /* Zero is present in any register class. */
1512 if (x == CONST0_RTX (GET_MODE (x)))
1513 return class;
1514
1515 /* These sorts of constants we can easily drop to memory. */
1516 if (GET_CODE (x) == CONST_INT
1517 || GET_CODE (x) == CONST_DOUBLE
1518 || GET_CODE (x) == CONST_VECTOR)
1519 {
1520 if (class == FLOAT_REGS)
1521 return NO_REGS;
1522 if (class == ALL_REGS)
1523 return GENERAL_REGS;
1524 return class;
1525 }
1526
1527 /* All other kinds of constants should not (and in the case of HIGH
1528 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1529 secondary reload. */
1530 if (CONSTANT_P (x))
1531 return (class == ALL_REGS ? GENERAL_REGS : class);
1532
1533 return class;
1534 }
1535
1536 /* Loading and storing HImode or QImode values to and from memory
1537 usually requires a scratch register. The exceptions are loading
1538 QImode and HImode from an aligned address to a general register
1539 unless byte instructions are permitted.
1540
1541 We also cannot load an unaligned address or a paradoxical SUBREG
1542 into an FP register.
1543
1544 We also cannot do integral arithmetic into FP regs, as might result
1545 from register elimination into a DImode fp register. */
1546
1547 enum reg_class
1548 alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
1549 rtx x, int in)
1550 {
1551 if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
1552 {
1553 if (GET_CODE (x) == MEM
1554 || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1555 || (GET_CODE (x) == SUBREG
1556 && (GET_CODE (SUBREG_REG (x)) == MEM
1557 || (GET_CODE (SUBREG_REG (x)) == REG
1558 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
1559 {
1560 if (!in || !aligned_memory_operand(x, mode))
1561 return GENERAL_REGS;
1562 }
1563 }
1564
1565 if (class == FLOAT_REGS)
1566 {
1567 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
1568 return GENERAL_REGS;
1569
1570 if (GET_CODE (x) == SUBREG
1571 && (GET_MODE_SIZE (GET_MODE (x))
1572 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
1573 return GENERAL_REGS;
1574
1575 if (in && INTEGRAL_MODE_P (mode)
1576 && ! (memory_operand (x, mode) || x == const0_rtx))
1577 return GENERAL_REGS;
1578 }
1579
1580 return NO_REGS;
1581 }
1582 \f
1583 /* Subfunction of the following function. Update the flags of any MEM
1584 found in part of X. */
1585
1586 static int
1587 alpha_set_memflags_1 (rtx *xp, void *data)
1588 {
1589 rtx x = *xp, orig = (rtx) data;
1590
1591 if (GET_CODE (x) != MEM)
1592 return 0;
1593
1594 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1595 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1596 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1597 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1598 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1599
1600 /* Sadly, we cannot use alias sets because the extra aliasing
1601 produced by the AND interferes. Given that two-byte quantities
1602 are the only thing we would be able to differentiate anyway,
1603 there does not seem to be any point in convoluting the early
1604 out of the alias check. */
1605
1606 return -1;
1607 }
1608
1609 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1610 generated to perform a memory operation, look for any MEMs in either
1611 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1612 volatile flags from REF into each of the MEMs found. If REF is not
1613 a MEM, don't do anything. */
1614
1615 void
1616 alpha_set_memflags (rtx insn, rtx ref)
1617 {
1618 rtx *base_ptr;
1619
1620 if (GET_CODE (ref) != MEM)
1621 return;
1622
1623 /* This is only called from alpha.md, after having had something
1624 generated from one of the insn patterns. So if everything is
1625 zero, the pattern is already up-to-date. */
1626 if (!MEM_VOLATILE_P (ref)
1627 && !MEM_IN_STRUCT_P (ref)
1628 && !MEM_SCALAR_P (ref)
1629 && !MEM_NOTRAP_P (ref)
1630 && !MEM_READONLY_P (ref))
1631 return;
1632
1633 if (INSN_P (insn))
1634 base_ptr = &PATTERN (insn);
1635 else
1636 base_ptr = &insn;
1637 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1638 }
1639 \f
1640 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1641 int, bool);
1642
1643 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1644 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1645 and return pc_rtx if successful. */
1646
1647 static rtx
1648 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1649 HOST_WIDE_INT c, int n, bool no_output)
1650 {
1651 HOST_WIDE_INT new;
1652 int i, bits;
1653 /* Use a pseudo if highly optimizing and still generating RTL. */
1654 rtx subtarget
1655 = (flag_expensive_optimizations && !no_new_pseudos ? 0 : target);
1656 rtx temp, insn;
1657
1658 /* If this is a sign-extended 32-bit constant, we can do this in at most
1659 three insns, so do it if we have enough insns left. We always have
1660 a sign-extended 32-bit constant when compiling on a narrow machine. */
1661
1662 if (HOST_BITS_PER_WIDE_INT != 64
1663 || c >> 31 == -1 || c >> 31 == 0)
1664 {
1665 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1666 HOST_WIDE_INT tmp1 = c - low;
1667 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1668 HOST_WIDE_INT extra = 0;
1669
1670 /* If HIGH will be interpreted as negative but the constant is
1671 positive, we must adjust it to do two ldha insns. */
1672
1673 if ((high & 0x8000) != 0 && c >= 0)
1674 {
1675 extra = 0x4000;
1676 tmp1 -= 0x40000000;
1677 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1678 }
1679
1680 if (c == low || (low == 0 && extra == 0))
1681 {
1682 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1683 but that meant that we can't handle INT_MIN on 32-bit machines
1684 (like NT/Alpha), because we recurse indefinitely through
1685 emit_move_insn to gen_movdi. So instead, since we know exactly
1686 what we want, create it explicitly. */
1687
1688 if (no_output)
1689 return pc_rtx;
1690 if (target == NULL)
1691 target = gen_reg_rtx (mode);
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1693 return target;
1694 }
1695 else if (n >= 2 + (extra != 0))
1696 {
1697 if (no_output)
1698 return pc_rtx;
1699 if (no_new_pseudos)
1700 {
1701 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1702 temp = target;
1703 }
1704 else
1705 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1706 subtarget, mode);
1707
1708 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1709 This means that if we go through expand_binop, we'll try to
1710 generate extensions, etc, which will require new pseudos, which
1711 will fail during some split phases. The SImode add patterns
1712 still exist, but are not named. So build the insns by hand. */
1713
1714 if (extra != 0)
1715 {
1716 if (! subtarget)
1717 subtarget = gen_reg_rtx (mode);
1718 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1719 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1720 emit_insn (insn);
1721 temp = subtarget;
1722 }
1723
1724 if (target == NULL)
1725 target = gen_reg_rtx (mode);
1726 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1727 insn = gen_rtx_SET (VOIDmode, target, insn);
1728 emit_insn (insn);
1729 return target;
1730 }
1731 }
1732
1733 /* If we couldn't do it that way, try some other methods. But if we have
1734 no instructions left, don't bother. Likewise, if this is SImode and
1735 we can't make pseudos, we can't do anything since the expand_binop
1736 and expand_unop calls will widen and try to make pseudos. */
1737
1738 if (n == 1 || (mode == SImode && no_new_pseudos))
1739 return 0;
1740
1741 /* Next, see if we can load a related constant and then shift and possibly
1742 negate it to get the constant we want. Try this once each increasing
1743 numbers of insns. */
1744
1745 for (i = 1; i < n; i++)
1746 {
1747 /* First, see if minus some low bits, we've an easy load of
1748 high bits. */
1749
1750 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1751 if (new != 0)
1752 {
1753 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1754 if (temp)
1755 {
1756 if (no_output)
1757 return temp;
1758 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1759 target, 0, OPTAB_WIDEN);
1760 }
1761 }
1762
1763 /* Next try complementing. */
1764 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1765 if (temp)
1766 {
1767 if (no_output)
1768 return temp;
1769 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1770 }
1771
1772 /* Next try to form a constant and do a left shift. We can do this
1773 if some low-order bits are zero; the exact_log2 call below tells
1774 us that information. The bits we are shifting out could be any
1775 value, but here we'll just try the 0- and sign-extended forms of
1776 the constant. To try to increase the chance of having the same
1777 constant in more than one insn, start at the highest number of
1778 bits to shift, but try all possibilities in case a ZAPNOT will
1779 be useful. */
1780
1781 bits = exact_log2 (c & -c);
1782 if (bits > 0)
1783 for (; bits > 0; bits--)
1784 {
1785 new = c >> bits;
1786 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1787 if (!temp && c < 0)
1788 {
1789 new = (unsigned HOST_WIDE_INT)c >> bits;
1790 temp = alpha_emit_set_const (subtarget, mode, new,
1791 i, no_output);
1792 }
1793 if (temp)
1794 {
1795 if (no_output)
1796 return temp;
1797 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1798 target, 0, OPTAB_WIDEN);
1799 }
1800 }
1801
1802 /* Now try high-order zero bits. Here we try the shifted-in bits as
1803 all zero and all ones. Be careful to avoid shifting outside the
1804 mode and to avoid shifting outside the host wide int size. */
1805 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1806 confuse the recursive call and set all of the high 32 bits. */
1807
1808 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1809 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1810 if (bits > 0)
1811 for (; bits > 0; bits--)
1812 {
1813 new = c << bits;
1814 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1815 if (!temp)
1816 {
1817 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1818 temp = alpha_emit_set_const (subtarget, mode, new,
1819 i, no_output);
1820 }
1821 if (temp)
1822 {
1823 if (no_output)
1824 return temp;
1825 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1826 target, 1, OPTAB_WIDEN);
1827 }
1828 }
1829
1830 /* Now try high-order 1 bits. We get that with a sign-extension.
1831 But one bit isn't enough here. Be careful to avoid shifting outside
1832 the mode and to avoid shifting outside the host wide int size. */
1833
1834 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1835 - floor_log2 (~ c) - 2);
1836 if (bits > 0)
1837 for (; bits > 0; bits--)
1838 {
1839 new = c << bits;
1840 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1841 if (!temp)
1842 {
1843 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1844 temp = alpha_emit_set_const (subtarget, mode, new,
1845 i, no_output);
1846 }
1847 if (temp)
1848 {
1849 if (no_output)
1850 return temp;
1851 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1852 target, 0, OPTAB_WIDEN);
1853 }
1854 }
1855 }
1856
1857 #if HOST_BITS_PER_WIDE_INT == 64
1858 /* Finally, see if can load a value into the target that is the same as the
1859 constant except that all bytes that are 0 are changed to be 0xff. If we
1860 can, then we can do a ZAPNOT to obtain the desired constant. */
1861
1862 new = c;
1863 for (i = 0; i < 64; i += 8)
1864 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1865 new |= (HOST_WIDE_INT) 0xff << i;
1866
1867 /* We are only called for SImode and DImode. If this is SImode, ensure that
1868 we are sign extended to a full word. */
1869
1870 if (mode == SImode)
1871 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1872
1873 if (new != c)
1874 {
1875 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1876 if (temp)
1877 {
1878 if (no_output)
1879 return temp;
1880 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1881 target, 0, OPTAB_WIDEN);
1882 }
1883 }
1884 #endif
1885
1886 return 0;
1887 }
1888
1889 /* Try to output insns to set TARGET equal to the constant C if it can be
1890 done in less than N insns. Do all computations in MODE. Returns the place
1891 where the output has been placed if it can be done and the insns have been
1892 emitted. If it would take more than N insns, zero is returned and no
1893 insns and emitted. */
1894
1895 static rtx
1896 alpha_emit_set_const (rtx target, enum machine_mode mode,
1897 HOST_WIDE_INT c, int n, bool no_output)
1898 {
1899 enum machine_mode orig_mode = mode;
1900 rtx orig_target = target;
1901 rtx result = 0;
1902 int i;
1903
1904 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1905 can't load this constant in one insn, do this in DImode. */
1906 if (no_new_pseudos && mode == SImode
1907 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1908 {
1909 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1910 if (result)
1911 return result;
1912
1913 target = no_output ? NULL : gen_lowpart (DImode, target);
1914 mode = DImode;
1915 }
1916 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1917 {
1918 target = no_output ? NULL : gen_lowpart (DImode, target);
1919 mode = DImode;
1920 }
1921
1922 /* Try 1 insn, then 2, then up to N. */
1923 for (i = 1; i <= n; i++)
1924 {
1925 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1926 if (result)
1927 {
1928 rtx insn, set;
1929
1930 if (no_output)
1931 return result;
1932
1933 insn = get_last_insn ();
1934 set = single_set (insn);
1935 if (! CONSTANT_P (SET_SRC (set)))
1936 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1937 break;
1938 }
1939 }
1940
1941 /* Allow for the case where we changed the mode of TARGET. */
1942 if (result)
1943 {
1944 if (result == target)
1945 result = orig_target;
1946 else if (mode != orig_mode)
1947 result = gen_lowpart (orig_mode, result);
1948 }
1949
1950 return result;
1951 }
1952
1953 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1954 fall back to a straight forward decomposition. We do this to avoid
1955 exponential run times encountered when looking for longer sequences
1956 with alpha_emit_set_const. */
1957
1958 static rtx
1959 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1960 {
1961 HOST_WIDE_INT d1, d2, d3, d4;
1962
1963 /* Decompose the entire word */
1964 #if HOST_BITS_PER_WIDE_INT >= 64
1965 gcc_assert (c2 == -(c1 < 0));
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 c1 = (c1 - d2) >> 32;
1970 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1971 c1 -= d3;
1972 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1973 gcc_assert (c1 == d4);
1974 #else
1975 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1976 c1 -= d1;
1977 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1978 gcc_assert (c1 == d2);
1979 c2 += (d2 < 0);
1980 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1981 c2 -= d3;
1982 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1983 gcc_assert (c2 == d4);
1984 #endif
1985
1986 /* Construct the high word */
1987 if (d4)
1988 {
1989 emit_move_insn (target, GEN_INT (d4));
1990 if (d3)
1991 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1992 }
1993 else
1994 emit_move_insn (target, GEN_INT (d3));
1995
1996 /* Shift it into place */
1997 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1998
1999 /* Add in the low bits. */
2000 if (d2)
2001 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2002 if (d1)
2003 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2004
2005 return target;
2006 }
2007
2008 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2009 the low 64 bits. */
2010
2011 static void
2012 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2013 {
2014 HOST_WIDE_INT i0, i1;
2015
2016 if (GET_CODE (x) == CONST_VECTOR)
2017 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2018
2019
2020 if (GET_CODE (x) == CONST_INT)
2021 {
2022 i0 = INTVAL (x);
2023 i1 = -(i0 < 0);
2024 }
2025 else if (HOST_BITS_PER_WIDE_INT >= 64)
2026 {
2027 i0 = CONST_DOUBLE_LOW (x);
2028 i1 = -(i0 < 0);
2029 }
2030 else
2031 {
2032 i0 = CONST_DOUBLE_LOW (x);
2033 i1 = CONST_DOUBLE_HIGH (x);
2034 }
2035
2036 *p0 = i0;
2037 *p1 = i1;
2038 }
2039
2040 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2041 are willing to load the value into a register via a move pattern.
2042 Normally this is all symbolic constants, integral constants that
2043 take three or fewer instructions, and floating-point zero. */
2044
2045 bool
2046 alpha_legitimate_constant_p (rtx x)
2047 {
2048 enum machine_mode mode = GET_MODE (x);
2049 HOST_WIDE_INT i0, i1;
2050
2051 switch (GET_CODE (x))
2052 {
2053 case CONST:
2054 case LABEL_REF:
2055 case HIGH:
2056 return true;
2057
2058 case SYMBOL_REF:
2059 /* TLS symbols are never valid. */
2060 return SYMBOL_REF_TLS_MODEL (x) == 0;
2061
2062 case CONST_DOUBLE:
2063 if (x == CONST0_RTX (mode))
2064 return true;
2065 if (FLOAT_MODE_P (mode))
2066 return false;
2067 goto do_integer;
2068
2069 case CONST_VECTOR:
2070 if (x == CONST0_RTX (mode))
2071 return true;
2072 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2073 return false;
2074 if (GET_MODE_SIZE (mode) != 8)
2075 return false;
2076 goto do_integer;
2077
2078 case CONST_INT:
2079 do_integer:
2080 if (TARGET_BUILD_CONSTANTS)
2081 return true;
2082 alpha_extract_integer (x, &i0, &i1);
2083 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2084 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2085 return false;
2086
2087 default:
2088 return false;
2089 }
2090 }
2091
2092 /* Operand 1 is known to be a constant, and should require more than one
2093 instruction to load. Emit that multi-part load. */
2094
2095 bool
2096 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2097 {
2098 HOST_WIDE_INT i0, i1;
2099 rtx temp = NULL_RTX;
2100
2101 alpha_extract_integer (operands[1], &i0, &i1);
2102
2103 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2104 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2105
2106 if (!temp && TARGET_BUILD_CONSTANTS)
2107 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2108
2109 if (temp)
2110 {
2111 if (!rtx_equal_p (operands[0], temp))
2112 emit_move_insn (operands[0], temp);
2113 return true;
2114 }
2115
2116 return false;
2117 }
2118
2119 /* Expand a move instruction; return true if all work is done.
2120 We don't handle non-bwx subword loads here. */
2121
2122 bool
2123 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2124 {
2125 /* If the output is not a register, the input must be. */
2126 if (GET_CODE (operands[0]) == MEM
2127 && ! reg_or_0_operand (operands[1], mode))
2128 operands[1] = force_reg (mode, operands[1]);
2129
2130 /* Allow legitimize_address to perform some simplifications. */
2131 if (mode == Pmode && symbolic_operand (operands[1], mode))
2132 {
2133 rtx tmp;
2134
2135 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2136 if (tmp)
2137 {
2138 if (tmp == operands[0])
2139 return true;
2140 operands[1] = tmp;
2141 return false;
2142 }
2143 }
2144
2145 /* Early out for non-constants and valid constants. */
2146 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2147 return false;
2148
2149 /* Split large integers. */
2150 if (GET_CODE (operands[1]) == CONST_INT
2151 || GET_CODE (operands[1]) == CONST_DOUBLE
2152 || GET_CODE (operands[1]) == CONST_VECTOR)
2153 {
2154 if (alpha_split_const_mov (mode, operands))
2155 return true;
2156 }
2157
2158 /* Otherwise we've nothing left but to drop the thing to memory. */
2159 operands[1] = force_const_mem (mode, operands[1]);
2160 if (reload_in_progress)
2161 {
2162 emit_move_insn (operands[0], XEXP (operands[1], 0));
2163 operands[1] = copy_rtx (operands[1]);
2164 XEXP (operands[1], 0) = operands[0];
2165 }
2166 else
2167 operands[1] = validize_mem (operands[1]);
2168 return false;
2169 }
2170
2171 /* Expand a non-bwx QImode or HImode move instruction;
2172 return true if all work is done. */
2173
2174 bool
2175 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2176 {
2177 /* If the output is not a register, the input must be. */
2178 if (GET_CODE (operands[0]) == MEM)
2179 operands[1] = force_reg (mode, operands[1]);
2180
2181 /* Handle four memory cases, unaligned and aligned for either the input
2182 or the output. The only case where we can be called during reload is
2183 for aligned loads; all other cases require temporaries. */
2184
2185 if (GET_CODE (operands[1]) == MEM
2186 || (GET_CODE (operands[1]) == SUBREG
2187 && GET_CODE (SUBREG_REG (operands[1])) == MEM)
2188 || (reload_in_progress && GET_CODE (operands[1]) == REG
2189 && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
2190 || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
2191 && GET_CODE (SUBREG_REG (operands[1])) == REG
2192 && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
2193 {
2194 if (aligned_memory_operand (operands[1], mode))
2195 {
2196 if (reload_in_progress)
2197 {
2198 emit_insn ((mode == QImode
2199 ? gen_reload_inqi_help
2200 : gen_reload_inhi_help)
2201 (operands[0], operands[1],
2202 gen_rtx_REG (SImode, REGNO (operands[0]))));
2203 }
2204 else
2205 {
2206 rtx aligned_mem, bitnum;
2207 rtx scratch = gen_reg_rtx (SImode);
2208 rtx subtarget;
2209 bool copyout;
2210
2211 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2212
2213 subtarget = operands[0];
2214 if (GET_CODE (subtarget) == REG)
2215 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2216 else
2217 subtarget = gen_reg_rtx (DImode), copyout = true;
2218
2219 emit_insn ((mode == QImode
2220 ? gen_aligned_loadqi
2221 : gen_aligned_loadhi)
2222 (subtarget, aligned_mem, bitnum, scratch));
2223
2224 if (copyout)
2225 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2226 }
2227 }
2228 else
2229 {
2230 /* Don't pass these as parameters since that makes the generated
2231 code depend on parameter evaluation order which will cause
2232 bootstrap failures. */
2233
2234 rtx temp1, temp2, seq, subtarget;
2235 bool copyout;
2236
2237 temp1 = gen_reg_rtx (DImode);
2238 temp2 = gen_reg_rtx (DImode);
2239
2240 subtarget = operands[0];
2241 if (GET_CODE (subtarget) == REG)
2242 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2243 else
2244 subtarget = gen_reg_rtx (DImode), copyout = true;
2245
2246 seq = ((mode == QImode
2247 ? gen_unaligned_loadqi
2248 : gen_unaligned_loadhi)
2249 (subtarget, get_unaligned_address (operands[1]),
2250 temp1, temp2));
2251 alpha_set_memflags (seq, operands[1]);
2252 emit_insn (seq);
2253
2254 if (copyout)
2255 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2256 }
2257 return true;
2258 }
2259
2260 if (GET_CODE (operands[0]) == MEM
2261 || (GET_CODE (operands[0]) == SUBREG
2262 && GET_CODE (SUBREG_REG (operands[0])) == MEM)
2263 || (reload_in_progress && GET_CODE (operands[0]) == REG
2264 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
2265 || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
2266 && GET_CODE (SUBREG_REG (operands[0])) == REG
2267 && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
2268 {
2269 if (aligned_memory_operand (operands[0], mode))
2270 {
2271 rtx aligned_mem, bitnum;
2272 rtx temp1 = gen_reg_rtx (SImode);
2273 rtx temp2 = gen_reg_rtx (SImode);
2274
2275 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2276
2277 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2278 temp1, temp2));
2279 }
2280 else
2281 {
2282 rtx temp1 = gen_reg_rtx (DImode);
2283 rtx temp2 = gen_reg_rtx (DImode);
2284 rtx temp3 = gen_reg_rtx (DImode);
2285 rtx seq = ((mode == QImode
2286 ? gen_unaligned_storeqi
2287 : gen_unaligned_storehi)
2288 (get_unaligned_address (operands[0]),
2289 operands[1], temp1, temp2, temp3));
2290
2291 alpha_set_memflags (seq, operands[0]);
2292 emit_insn (seq);
2293 }
2294 return true;
2295 }
2296
2297 return false;
2298 }
2299
2300 /* Implement the movmisalign patterns. One of the operands is a memory
2301 that is not naturally aligned. Emit instructions to load it. */
2302
2303 void
2304 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2305 {
2306 /* Honor misaligned loads, for those we promised to do so. */
2307 if (MEM_P (operands[1]))
2308 {
2309 rtx tmp;
2310
2311 if (register_operand (operands[0], mode))
2312 tmp = operands[0];
2313 else
2314 tmp = gen_reg_rtx (mode);
2315
2316 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2317 if (tmp != operands[0])
2318 emit_move_insn (operands[0], tmp);
2319 }
2320 else if (MEM_P (operands[0]))
2321 {
2322 if (!reg_or_0_operand (operands[1], mode))
2323 operands[1] = force_reg (mode, operands[1]);
2324 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2325 }
2326 else
2327 gcc_unreachable ();
2328 }
2329
2330 /* Generate an unsigned DImode to FP conversion. This is the same code
2331 optabs would emit if we didn't have TFmode patterns.
2332
2333 For SFmode, this is the only construction I've found that can pass
2334 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2335 intermediates will work, because you'll get intermediate rounding
2336 that ruins the end result. Some of this could be fixed by turning
2337 on round-to-positive-infinity, but that requires diddling the fpsr,
2338 which kills performance. I tried turning this around and converting
2339 to a negative number, so that I could turn on /m, but either I did
2340 it wrong or there's something else cause I wound up with the exact
2341 same single-bit error. There is a branch-less form of this same code:
2342
2343 srl $16,1,$1
2344 and $16,1,$2
2345 cmplt $16,0,$3
2346 or $1,$2,$2
2347 cmovge $16,$16,$2
2348 itoft $3,$f10
2349 itoft $2,$f11
2350 cvtqs $f11,$f11
2351 adds $f11,$f11,$f0
2352 fcmoveq $f10,$f11,$f0
2353
2354 I'm not using it because it's the same number of instructions as
2355 this branch-full form, and it has more serialized long latency
2356 instructions on the critical path.
2357
2358 For DFmode, we can avoid rounding errors by breaking up the word
2359 into two pieces, converting them separately, and adding them back:
2360
2361 LC0: .long 0,0x5f800000
2362
2363 itoft $16,$f11
2364 lda $2,LC0
2365 cmplt $16,0,$1
2366 cpyse $f11,$f31,$f10
2367 cpyse $f31,$f11,$f11
2368 s4addq $1,$2,$1
2369 lds $f12,0($1)
2370 cvtqt $f10,$f10
2371 cvtqt $f11,$f11
2372 addt $f12,$f10,$f0
2373 addt $f0,$f11,$f0
2374
2375 This doesn't seem to be a clear-cut win over the optabs form.
2376 It probably all depends on the distribution of numbers being
2377 converted -- in the optabs form, all but high-bit-set has a
2378 much lower minimum execution time. */
2379
2380 void
2381 alpha_emit_floatuns (rtx operands[2])
2382 {
2383 rtx neglab, donelab, i0, i1, f0, in, out;
2384 enum machine_mode mode;
2385
2386 out = operands[0];
2387 in = force_reg (DImode, operands[1]);
2388 mode = GET_MODE (out);
2389 neglab = gen_label_rtx ();
2390 donelab = gen_label_rtx ();
2391 i0 = gen_reg_rtx (DImode);
2392 i1 = gen_reg_rtx (DImode);
2393 f0 = gen_reg_rtx (mode);
2394
2395 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2396
2397 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2398 emit_jump_insn (gen_jump (donelab));
2399 emit_barrier ();
2400
2401 emit_label (neglab);
2402
2403 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2404 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2405 emit_insn (gen_iordi3 (i0, i0, i1));
2406 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2407 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2408
2409 emit_label (donelab);
2410 }
2411
2412 /* Generate the comparison for a conditional branch. */
2413
2414 rtx
2415 alpha_emit_conditional_branch (enum rtx_code code)
2416 {
2417 enum rtx_code cmp_code, branch_code;
2418 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2419 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2420 rtx tem;
2421
2422 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2423 {
2424 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2425 op1 = const0_rtx;
2426 alpha_compare.fp_p = 0;
2427 }
2428
2429 /* The general case: fold the comparison code to the types of compares
2430 that we have, choosing the branch as necessary. */
2431 switch (code)
2432 {
2433 case EQ: case LE: case LT: case LEU: case LTU:
2434 case UNORDERED:
2435 /* We have these compares: */
2436 cmp_code = code, branch_code = NE;
2437 break;
2438
2439 case NE:
2440 case ORDERED:
2441 /* These must be reversed. */
2442 cmp_code = reverse_condition (code), branch_code = EQ;
2443 break;
2444
2445 case GE: case GT: case GEU: case GTU:
2446 /* For FP, we swap them, for INT, we reverse them. */
2447 if (alpha_compare.fp_p)
2448 {
2449 cmp_code = swap_condition (code);
2450 branch_code = NE;
2451 tem = op0, op0 = op1, op1 = tem;
2452 }
2453 else
2454 {
2455 cmp_code = reverse_condition (code);
2456 branch_code = EQ;
2457 }
2458 break;
2459
2460 default:
2461 gcc_unreachable ();
2462 }
2463
2464 if (alpha_compare.fp_p)
2465 {
2466 cmp_mode = DFmode;
2467 if (flag_unsafe_math_optimizations)
2468 {
2469 /* When we are not as concerned about non-finite values, and we
2470 are comparing against zero, we can branch directly. */
2471 if (op1 == CONST0_RTX (DFmode))
2472 cmp_code = UNKNOWN, branch_code = code;
2473 else if (op0 == CONST0_RTX (DFmode))
2474 {
2475 /* Undo the swap we probably did just above. */
2476 tem = op0, op0 = op1, op1 = tem;
2477 branch_code = swap_condition (cmp_code);
2478 cmp_code = UNKNOWN;
2479 }
2480 }
2481 else
2482 {
2483 /* ??? We mark the branch mode to be CCmode to prevent the
2484 compare and branch from being combined, since the compare
2485 insn follows IEEE rules that the branch does not. */
2486 branch_mode = CCmode;
2487 }
2488 }
2489 else
2490 {
2491 cmp_mode = DImode;
2492
2493 /* The following optimizations are only for signed compares. */
2494 if (code != LEU && code != LTU && code != GEU && code != GTU)
2495 {
2496 /* Whee. Compare and branch against 0 directly. */
2497 if (op1 == const0_rtx)
2498 cmp_code = UNKNOWN, branch_code = code;
2499
2500 /* If the constants doesn't fit into an immediate, but can
2501 be generated by lda/ldah, we adjust the argument and
2502 compare against zero, so we can use beq/bne directly. */
2503 /* ??? Don't do this when comparing against symbols, otherwise
2504 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2505 be declared false out of hand (at least for non-weak). */
2506 else if (GET_CODE (op1) == CONST_INT
2507 && (code == EQ || code == NE)
2508 && !(symbolic_operand (op0, VOIDmode)
2509 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2510 {
2511 rtx n_op1 = GEN_INT (-INTVAL (op1));
2512
2513 if (! satisfies_constraint_I (op1)
2514 && (satisfies_constraint_K (n_op1)
2515 || satisfies_constraint_L (n_op1)))
2516 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2517 }
2518 }
2519
2520 if (!reg_or_0_operand (op0, DImode))
2521 op0 = force_reg (DImode, op0);
2522 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2523 op1 = force_reg (DImode, op1);
2524 }
2525
2526 /* Emit an initial compare instruction, if necessary. */
2527 tem = op0;
2528 if (cmp_code != UNKNOWN)
2529 {
2530 tem = gen_reg_rtx (cmp_mode);
2531 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2532 }
2533
2534 /* Zero the operands. */
2535 memset (&alpha_compare, 0, sizeof (alpha_compare));
2536
2537 /* Return the branch comparison. */
2538 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2539 }
2540
2541 /* Certain simplifications can be done to make invalid setcc operations
2542 valid. Return the final comparison, or NULL if we can't work. */
2543
2544 rtx
2545 alpha_emit_setcc (enum rtx_code code)
2546 {
2547 enum rtx_code cmp_code;
2548 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2549 int fp_p = alpha_compare.fp_p;
2550 rtx tmp;
2551
2552 /* Zero the operands. */
2553 memset (&alpha_compare, 0, sizeof (alpha_compare));
2554
2555 if (fp_p && GET_MODE (op0) == TFmode)
2556 {
2557 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2558 op1 = const0_rtx;
2559 fp_p = 0;
2560 }
2561
2562 if (fp_p && !TARGET_FIX)
2563 return NULL_RTX;
2564
2565 /* The general case: fold the comparison code to the types of compares
2566 that we have, choosing the branch as necessary. */
2567
2568 cmp_code = UNKNOWN;
2569 switch (code)
2570 {
2571 case EQ: case LE: case LT: case LEU: case LTU:
2572 case UNORDERED:
2573 /* We have these compares. */
2574 if (fp_p)
2575 cmp_code = code, code = NE;
2576 break;
2577
2578 case NE:
2579 if (!fp_p && op1 == const0_rtx)
2580 break;
2581 /* FALLTHRU */
2582
2583 case ORDERED:
2584 cmp_code = reverse_condition (code);
2585 code = EQ;
2586 break;
2587
2588 case GE: case GT: case GEU: case GTU:
2589 /* These normally need swapping, but for integer zero we have
2590 special patterns that recognize swapped operands. */
2591 if (!fp_p && op1 == const0_rtx)
2592 break;
2593 code = swap_condition (code);
2594 if (fp_p)
2595 cmp_code = code, code = NE;
2596 tmp = op0, op0 = op1, op1 = tmp;
2597 break;
2598
2599 default:
2600 gcc_unreachable ();
2601 }
2602
2603 if (!fp_p)
2604 {
2605 if (!register_operand (op0, DImode))
2606 op0 = force_reg (DImode, op0);
2607 if (!reg_or_8bit_operand (op1, DImode))
2608 op1 = force_reg (DImode, op1);
2609 }
2610
2611 /* Emit an initial compare instruction, if necessary. */
2612 if (cmp_code != UNKNOWN)
2613 {
2614 enum machine_mode mode = fp_p ? DFmode : DImode;
2615
2616 tmp = gen_reg_rtx (mode);
2617 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2618 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2619
2620 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2621 op1 = const0_rtx;
2622 }
2623
2624 /* Return the setcc comparison. */
2625 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2626 }
2627
2628
2629 /* Rewrite a comparison against zero CMP of the form
2630 (CODE (cc0) (const_int 0)) so it can be written validly in
2631 a conditional move (if_then_else CMP ...).
2632 If both of the operands that set cc0 are nonzero we must emit
2633 an insn to perform the compare (it can't be done within
2634 the conditional move). */
2635
2636 rtx
2637 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2638 {
2639 enum rtx_code code = GET_CODE (cmp);
2640 enum rtx_code cmov_code = NE;
2641 rtx op0 = alpha_compare.op0;
2642 rtx op1 = alpha_compare.op1;
2643 int fp_p = alpha_compare.fp_p;
2644 enum machine_mode cmp_mode
2645 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2646 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2647 enum machine_mode cmov_mode = VOIDmode;
2648 int local_fast_math = flag_unsafe_math_optimizations;
2649 rtx tem;
2650
2651 /* Zero the operands. */
2652 memset (&alpha_compare, 0, sizeof (alpha_compare));
2653
2654 if (fp_p != FLOAT_MODE_P (mode))
2655 {
2656 enum rtx_code cmp_code;
2657
2658 if (! TARGET_FIX)
2659 return 0;
2660
2661 /* If we have fp<->int register move instructions, do a cmov by
2662 performing the comparison in fp registers, and move the
2663 zero/nonzero value to integer registers, where we can then
2664 use a normal cmov, or vice-versa. */
2665
2666 switch (code)
2667 {
2668 case EQ: case LE: case LT: case LEU: case LTU:
2669 /* We have these compares. */
2670 cmp_code = code, code = NE;
2671 break;
2672
2673 case NE:
2674 /* This must be reversed. */
2675 cmp_code = EQ, code = EQ;
2676 break;
2677
2678 case GE: case GT: case GEU: case GTU:
2679 /* These normally need swapping, but for integer zero we have
2680 special patterns that recognize swapped operands. */
2681 if (!fp_p && op1 == const0_rtx)
2682 cmp_code = code, code = NE;
2683 else
2684 {
2685 cmp_code = swap_condition (code);
2686 code = NE;
2687 tem = op0, op0 = op1, op1 = tem;
2688 }
2689 break;
2690
2691 default:
2692 gcc_unreachable ();
2693 }
2694
2695 tem = gen_reg_rtx (cmp_op_mode);
2696 emit_insn (gen_rtx_SET (VOIDmode, tem,
2697 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2698 op0, op1)));
2699
2700 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2701 op0 = gen_lowpart (cmp_op_mode, tem);
2702 op1 = CONST0_RTX (cmp_op_mode);
2703 fp_p = !fp_p;
2704 local_fast_math = 1;
2705 }
2706
2707 /* We may be able to use a conditional move directly.
2708 This avoids emitting spurious compares. */
2709 if (signed_comparison_operator (cmp, VOIDmode)
2710 && (!fp_p || local_fast_math)
2711 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2712 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2713
2714 /* We can't put the comparison inside the conditional move;
2715 emit a compare instruction and put that inside the
2716 conditional move. Make sure we emit only comparisons we have;
2717 swap or reverse as necessary. */
2718
2719 if (no_new_pseudos)
2720 return NULL_RTX;
2721
2722 switch (code)
2723 {
2724 case EQ: case LE: case LT: case LEU: case LTU:
2725 /* We have these compares: */
2726 break;
2727
2728 case NE:
2729 /* This must be reversed. */
2730 code = reverse_condition (code);
2731 cmov_code = EQ;
2732 break;
2733
2734 case GE: case GT: case GEU: case GTU:
2735 /* These must be swapped. */
2736 if (op1 != CONST0_RTX (cmp_mode))
2737 {
2738 code = swap_condition (code);
2739 tem = op0, op0 = op1, op1 = tem;
2740 }
2741 break;
2742
2743 default:
2744 gcc_unreachable ();
2745 }
2746
2747 if (!fp_p)
2748 {
2749 if (!reg_or_0_operand (op0, DImode))
2750 op0 = force_reg (DImode, op0);
2751 if (!reg_or_8bit_operand (op1, DImode))
2752 op1 = force_reg (DImode, op1);
2753 }
2754
2755 /* ??? We mark the branch mode to be CCmode to prevent the compare
2756 and cmov from being combined, since the compare insn follows IEEE
2757 rules that the cmov does not. */
2758 if (fp_p && !local_fast_math)
2759 cmov_mode = CCmode;
2760
2761 tem = gen_reg_rtx (cmp_op_mode);
2762 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2763 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2764 }
2765
2766 /* Simplify a conditional move of two constants into a setcc with
2767 arithmetic. This is done with a splitter since combine would
2768 just undo the work if done during code generation. It also catches
2769 cases we wouldn't have before cse. */
2770
2771 int
2772 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2773 rtx t_rtx, rtx f_rtx)
2774 {
2775 HOST_WIDE_INT t, f, diff;
2776 enum machine_mode mode;
2777 rtx target, subtarget, tmp;
2778
2779 mode = GET_MODE (dest);
2780 t = INTVAL (t_rtx);
2781 f = INTVAL (f_rtx);
2782 diff = t - f;
2783
2784 if (((code == NE || code == EQ) && diff < 0)
2785 || (code == GE || code == GT))
2786 {
2787 code = reverse_condition (code);
2788 diff = t, t = f, f = diff;
2789 diff = t - f;
2790 }
2791
2792 subtarget = target = dest;
2793 if (mode != DImode)
2794 {
2795 target = gen_lowpart (DImode, dest);
2796 if (! no_new_pseudos)
2797 subtarget = gen_reg_rtx (DImode);
2798 else
2799 subtarget = target;
2800 }
2801 /* Below, we must be careful to use copy_rtx on target and subtarget
2802 in intermediate insns, as they may be a subreg rtx, which may not
2803 be shared. */
2804
2805 if (f == 0 && exact_log2 (diff) > 0
2806 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2807 viable over a longer latency cmove. On EV5, the E0 slot is a
2808 scarce resource, and on EV4 shift has the same latency as a cmove. */
2809 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2810 {
2811 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2812 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2813
2814 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2815 GEN_INT (exact_log2 (t)));
2816 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2817 }
2818 else if (f == 0 && t == -1)
2819 {
2820 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2821 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2822
2823 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2824 }
2825 else if (diff == 1 || diff == 4 || diff == 8)
2826 {
2827 rtx add_op;
2828
2829 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2830 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2831
2832 if (diff == 1)
2833 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2834 else
2835 {
2836 add_op = GEN_INT (f);
2837 if (sext_add_operand (add_op, mode))
2838 {
2839 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2840 GEN_INT (diff));
2841 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2842 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2843 }
2844 else
2845 return 0;
2846 }
2847 }
2848 else
2849 return 0;
2850
2851 return 1;
2852 }
2853 \f
2854 /* Look up the function X_floating library function name for the
2855 given operation. */
2856
2857 struct xfloating_op GTY(())
2858 {
2859 const enum rtx_code code;
2860 const char *const GTY((skip)) osf_func;
2861 const char *const GTY((skip)) vms_func;
2862 rtx libcall;
2863 };
2864
2865 static GTY(()) struct xfloating_op xfloating_ops[] =
2866 {
2867 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2868 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2869 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2870 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2871 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2872 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2873 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2874 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2875 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2876 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2877 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2878 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2879 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2880 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2881 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2882 };
2883
2884 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2885 {
2886 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2887 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2888 };
2889
2890 static rtx
2891 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2892 {
2893 struct xfloating_op *ops = xfloating_ops;
2894 long n = ARRAY_SIZE (xfloating_ops);
2895 long i;
2896
2897 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2898
2899 /* How irritating. Nothing to key off for the main table. */
2900 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2901 {
2902 ops = vax_cvt_ops;
2903 n = ARRAY_SIZE (vax_cvt_ops);
2904 }
2905
2906 for (i = 0; i < n; ++i, ++ops)
2907 if (ops->code == code)
2908 {
2909 rtx func = ops->libcall;
2910 if (!func)
2911 {
2912 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2913 ? ops->vms_func : ops->osf_func);
2914 ops->libcall = func;
2915 }
2916 return func;
2917 }
2918
2919 gcc_unreachable ();
2920 }
2921
2922 /* Most X_floating operations take the rounding mode as an argument.
2923 Compute that here. */
2924
2925 static int
2926 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2927 enum alpha_fp_rounding_mode round)
2928 {
2929 int mode;
2930
2931 switch (round)
2932 {
2933 case ALPHA_FPRM_NORM:
2934 mode = 2;
2935 break;
2936 case ALPHA_FPRM_MINF:
2937 mode = 1;
2938 break;
2939 case ALPHA_FPRM_CHOP:
2940 mode = 0;
2941 break;
2942 case ALPHA_FPRM_DYN:
2943 mode = 4;
2944 break;
2945 default:
2946 gcc_unreachable ();
2947
2948 /* XXX For reference, round to +inf is mode = 3. */
2949 }
2950
2951 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2952 mode |= 0x10000;
2953
2954 return mode;
2955 }
2956
2957 /* Emit an X_floating library function call.
2958
2959 Note that these functions do not follow normal calling conventions:
2960 TFmode arguments are passed in two integer registers (as opposed to
2961 indirect); TFmode return values appear in R16+R17.
2962
2963 FUNC is the function to call.
2964 TARGET is where the output belongs.
2965 OPERANDS are the inputs.
2966 NOPERANDS is the count of inputs.
2967 EQUIV is the expression equivalent for the function.
2968 */
2969
2970 static void
2971 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2972 int noperands, rtx equiv)
2973 {
2974 rtx usage = NULL_RTX, tmp, reg;
2975 int regno = 16, i;
2976
2977 start_sequence ();
2978
2979 for (i = 0; i < noperands; ++i)
2980 {
2981 switch (GET_MODE (operands[i]))
2982 {
2983 case TFmode:
2984 reg = gen_rtx_REG (TFmode, regno);
2985 regno += 2;
2986 break;
2987
2988 case DFmode:
2989 reg = gen_rtx_REG (DFmode, regno + 32);
2990 regno += 1;
2991 break;
2992
2993 case VOIDmode:
2994 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2995 /* FALLTHRU */
2996 case DImode:
2997 reg = gen_rtx_REG (DImode, regno);
2998 regno += 1;
2999 break;
3000
3001 default:
3002 gcc_unreachable ();
3003 }
3004
3005 emit_move_insn (reg, operands[i]);
3006 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3007 }
3008
3009 switch (GET_MODE (target))
3010 {
3011 case TFmode:
3012 reg = gen_rtx_REG (TFmode, 16);
3013 break;
3014 case DFmode:
3015 reg = gen_rtx_REG (DFmode, 32);
3016 break;
3017 case DImode:
3018 reg = gen_rtx_REG (DImode, 0);
3019 break;
3020 default:
3021 gcc_unreachable ();
3022 }
3023
3024 tmp = gen_rtx_MEM (QImode, func);
3025 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3026 const0_rtx, const0_rtx));
3027 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3028 CONST_OR_PURE_CALL_P (tmp) = 1;
3029
3030 tmp = get_insns ();
3031 end_sequence ();
3032
3033 emit_libcall_block (tmp, target, reg, equiv);
3034 }
3035
3036 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3037
3038 void
3039 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3040 {
3041 rtx func;
3042 int mode;
3043 rtx out_operands[3];
3044
3045 func = alpha_lookup_xfloating_lib_func (code);
3046 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3047
3048 out_operands[0] = operands[1];
3049 out_operands[1] = operands[2];
3050 out_operands[2] = GEN_INT (mode);
3051 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3052 gen_rtx_fmt_ee (code, TFmode, operands[1],
3053 operands[2]));
3054 }
3055
3056 /* Emit an X_floating library function call for a comparison. */
3057
3058 static rtx
3059 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3060 {
3061 enum rtx_code cmp_code, res_code;
3062 rtx func, out, operands[2];
3063
3064 /* X_floating library comparison functions return
3065 -1 unordered
3066 0 false
3067 1 true
3068 Convert the compare against the raw return value. */
3069
3070 cmp_code = *pcode;
3071 switch (cmp_code)
3072 {
3073 case UNORDERED:
3074 cmp_code = EQ;
3075 res_code = LT;
3076 break;
3077 case ORDERED:
3078 cmp_code = EQ;
3079 res_code = GE;
3080 break;
3081 case NE:
3082 res_code = NE;
3083 break;
3084 case EQ:
3085 case LT:
3086 case GT:
3087 case LE:
3088 case GE:
3089 res_code = GT;
3090 break;
3091 default:
3092 gcc_unreachable ();
3093 }
3094 *pcode = res_code;
3095
3096 func = alpha_lookup_xfloating_lib_func (cmp_code);
3097
3098 operands[0] = op0;
3099 operands[1] = op1;
3100 out = gen_reg_rtx (DImode);
3101
3102 /* ??? Strange mode for equiv because what's actually returned
3103 is -1,0,1, not a proper boolean value. */
3104 alpha_emit_xfloating_libcall (func, out, operands, 2,
3105 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3106
3107 return out;
3108 }
3109
3110 /* Emit an X_floating library function call for a conversion. */
3111
3112 void
3113 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3114 {
3115 int noperands = 1, mode;
3116 rtx out_operands[2];
3117 rtx func;
3118 enum rtx_code code = orig_code;
3119
3120 if (code == UNSIGNED_FIX)
3121 code = FIX;
3122
3123 func = alpha_lookup_xfloating_lib_func (code);
3124
3125 out_operands[0] = operands[1];
3126
3127 switch (code)
3128 {
3129 case FIX:
3130 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3131 out_operands[1] = GEN_INT (mode);
3132 noperands = 2;
3133 break;
3134 case FLOAT_TRUNCATE:
3135 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3136 out_operands[1] = GEN_INT (mode);
3137 noperands = 2;
3138 break;
3139 default:
3140 break;
3141 }
3142
3143 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3144 gen_rtx_fmt_e (orig_code,
3145 GET_MODE (operands[0]),
3146 operands[1]));
3147 }
3148
3149 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3150 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3151 guarantee that the sequence
3152 set (OP[0] OP[2])
3153 set (OP[1] OP[3])
3154 is valid. Naturally, output operand ordering is little-endian.
3155 This is used by *movtf_internal and *movti_internal. */
3156
3157 void
3158 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3159 bool fixup_overlap)
3160 {
3161 switch (GET_CODE (operands[1]))
3162 {
3163 case REG:
3164 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3165 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3166 break;
3167
3168 case MEM:
3169 operands[3] = adjust_address (operands[1], DImode, 8);
3170 operands[2] = adjust_address (operands[1], DImode, 0);
3171 break;
3172
3173 case CONST_INT:
3174 case CONST_DOUBLE:
3175 gcc_assert (operands[1] == CONST0_RTX (mode));
3176 operands[2] = operands[3] = const0_rtx;
3177 break;
3178
3179 default:
3180 gcc_unreachable ();
3181 }
3182
3183 switch (GET_CODE (operands[0]))
3184 {
3185 case REG:
3186 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3187 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3188 break;
3189
3190 case MEM:
3191 operands[1] = adjust_address (operands[0], DImode, 8);
3192 operands[0] = adjust_address (operands[0], DImode, 0);
3193 break;
3194
3195 default:
3196 gcc_unreachable ();
3197 }
3198
3199 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3200 {
3201 rtx tmp;
3202 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3203 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3204 }
3205 }
3206
3207 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3208 op2 is a register containing the sign bit, operation is the
3209 logical operation to be performed. */
3210
3211 void
3212 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3213 {
3214 rtx high_bit = operands[2];
3215 rtx scratch;
3216 int move;
3217
3218 alpha_split_tmode_pair (operands, TFmode, false);
3219
3220 /* Detect three flavors of operand overlap. */
3221 move = 1;
3222 if (rtx_equal_p (operands[0], operands[2]))
3223 move = 0;
3224 else if (rtx_equal_p (operands[1], operands[2]))
3225 {
3226 if (rtx_equal_p (operands[0], high_bit))
3227 move = 2;
3228 else
3229 move = -1;
3230 }
3231
3232 if (move < 0)
3233 emit_move_insn (operands[0], operands[2]);
3234
3235 /* ??? If the destination overlaps both source tf and high_bit, then
3236 assume source tf is dead in its entirety and use the other half
3237 for a scratch register. Otherwise "scratch" is just the proper
3238 destination register. */
3239 scratch = operands[move < 2 ? 1 : 3];
3240
3241 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3242
3243 if (move > 0)
3244 {
3245 emit_move_insn (operands[0], operands[2]);
3246 if (move > 1)
3247 emit_move_insn (operands[1], scratch);
3248 }
3249 }
3250 \f
3251 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3252 unaligned data:
3253
3254 unsigned: signed:
3255 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3256 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3257 lda r3,X(r11) lda r3,X+2(r11)
3258 extwl r1,r3,r1 extql r1,r3,r1
3259 extwh r2,r3,r2 extqh r2,r3,r2
3260 or r1.r2.r1 or r1,r2,r1
3261 sra r1,48,r1
3262
3263 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3264 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3265 lda r3,X(r11) lda r3,X(r11)
3266 extll r1,r3,r1 extll r1,r3,r1
3267 extlh r2,r3,r2 extlh r2,r3,r2
3268 or r1.r2.r1 addl r1,r2,r1
3269
3270 quad: ldq_u r1,X(r11)
3271 ldq_u r2,X+7(r11)
3272 lda r3,X(r11)
3273 extql r1,r3,r1
3274 extqh r2,r3,r2
3275 or r1.r2.r1
3276 */
3277
3278 void
3279 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3280 HOST_WIDE_INT ofs, int sign)
3281 {
3282 rtx meml, memh, addr, extl, exth, tmp, mema;
3283 enum machine_mode mode;
3284
3285 if (TARGET_BWX && size == 2)
3286 {
3287 meml = adjust_address (mem, QImode, ofs);
3288 memh = adjust_address (mem, QImode, ofs+1);
3289 if (BYTES_BIG_ENDIAN)
3290 tmp = meml, meml = memh, memh = tmp;
3291 extl = gen_reg_rtx (DImode);
3292 exth = gen_reg_rtx (DImode);
3293 emit_insn (gen_zero_extendqidi2 (extl, meml));
3294 emit_insn (gen_zero_extendqidi2 (exth, memh));
3295 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3296 NULL, 1, OPTAB_LIB_WIDEN);
3297 addr = expand_simple_binop (DImode, IOR, extl, exth,
3298 NULL, 1, OPTAB_LIB_WIDEN);
3299
3300 if (sign && GET_MODE (tgt) != HImode)
3301 {
3302 addr = gen_lowpart (HImode, addr);
3303 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3304 }
3305 else
3306 {
3307 if (GET_MODE (tgt) != DImode)
3308 addr = gen_lowpart (GET_MODE (tgt), addr);
3309 emit_move_insn (tgt, addr);
3310 }
3311 return;
3312 }
3313
3314 meml = gen_reg_rtx (DImode);
3315 memh = gen_reg_rtx (DImode);
3316 addr = gen_reg_rtx (DImode);
3317 extl = gen_reg_rtx (DImode);
3318 exth = gen_reg_rtx (DImode);
3319
3320 mema = XEXP (mem, 0);
3321 if (GET_CODE (mema) == LO_SUM)
3322 mema = force_reg (Pmode, mema);
3323
3324 /* AND addresses cannot be in any alias set, since they may implicitly
3325 alias surrounding code. Ideally we'd have some alias set that
3326 covered all types except those with alignment 8 or higher. */
3327
3328 tmp = change_address (mem, DImode,
3329 gen_rtx_AND (DImode,
3330 plus_constant (mema, ofs),
3331 GEN_INT (-8)));
3332 set_mem_alias_set (tmp, 0);
3333 emit_move_insn (meml, tmp);
3334
3335 tmp = change_address (mem, DImode,
3336 gen_rtx_AND (DImode,
3337 plus_constant (mema, ofs + size - 1),
3338 GEN_INT (-8)));
3339 set_mem_alias_set (tmp, 0);
3340 emit_move_insn (memh, tmp);
3341
3342 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3343 {
3344 emit_move_insn (addr, plus_constant (mema, -1));
3345
3346 emit_insn (gen_extqh_be (extl, meml, addr));
3347 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3348
3349 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3350 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3351 addr, 1, OPTAB_WIDEN);
3352 }
3353 else if (sign && size == 2)
3354 {
3355 emit_move_insn (addr, plus_constant (mema, ofs+2));
3356
3357 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3358 emit_insn (gen_extqh_le (exth, memh, addr));
3359
3360 /* We must use tgt here for the target. Alpha-vms port fails if we use
3361 addr for the target, because addr is marked as a pointer and combine
3362 knows that pointers are always sign-extended 32-bit values. */
3363 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3364 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3365 addr, 1, OPTAB_WIDEN);
3366 }
3367 else
3368 {
3369 if (WORDS_BIG_ENDIAN)
3370 {
3371 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3372 switch ((int) size)
3373 {
3374 case 2:
3375 emit_insn (gen_extwh_be (extl, meml, addr));
3376 mode = HImode;
3377 break;
3378
3379 case 4:
3380 emit_insn (gen_extlh_be (extl, meml, addr));
3381 mode = SImode;
3382 break;
3383
3384 case 8:
3385 emit_insn (gen_extqh_be (extl, meml, addr));
3386 mode = DImode;
3387 break;
3388
3389 default:
3390 gcc_unreachable ();
3391 }
3392 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3393 }
3394 else
3395 {
3396 emit_move_insn (addr, plus_constant (mema, ofs));
3397 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3398 switch ((int) size)
3399 {
3400 case 2:
3401 emit_insn (gen_extwh_le (exth, memh, addr));
3402 mode = HImode;
3403 break;
3404
3405 case 4:
3406 emit_insn (gen_extlh_le (exth, memh, addr));
3407 mode = SImode;
3408 break;
3409
3410 case 8:
3411 emit_insn (gen_extqh_le (exth, memh, addr));
3412 mode = DImode;
3413 break;
3414
3415 default:
3416 gcc_unreachable ();
3417 }
3418 }
3419
3420 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3421 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3422 sign, OPTAB_WIDEN);
3423 }
3424
3425 if (addr != tgt)
3426 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3427 }
3428
3429 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3430
3431 void
3432 alpha_expand_unaligned_store (rtx dst, rtx src,
3433 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3434 {
3435 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3436
3437 if (TARGET_BWX && size == 2)
3438 {
3439 if (src != const0_rtx)
3440 {
3441 dstl = gen_lowpart (QImode, src);
3442 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3443 NULL, 1, OPTAB_LIB_WIDEN);
3444 dsth = gen_lowpart (QImode, dsth);
3445 }
3446 else
3447 dstl = dsth = const0_rtx;
3448
3449 meml = adjust_address (dst, QImode, ofs);
3450 memh = adjust_address (dst, QImode, ofs+1);
3451 if (BYTES_BIG_ENDIAN)
3452 addr = meml, meml = memh, memh = addr;
3453
3454 emit_move_insn (meml, dstl);
3455 emit_move_insn (memh, dsth);
3456 return;
3457 }
3458
3459 dstl = gen_reg_rtx (DImode);
3460 dsth = gen_reg_rtx (DImode);
3461 insl = gen_reg_rtx (DImode);
3462 insh = gen_reg_rtx (DImode);
3463
3464 dsta = XEXP (dst, 0);
3465 if (GET_CODE (dsta) == LO_SUM)
3466 dsta = force_reg (Pmode, dsta);
3467
3468 /* AND addresses cannot be in any alias set, since they may implicitly
3469 alias surrounding code. Ideally we'd have some alias set that
3470 covered all types except those with alignment 8 or higher. */
3471
3472 meml = change_address (dst, DImode,
3473 gen_rtx_AND (DImode,
3474 plus_constant (dsta, ofs),
3475 GEN_INT (-8)));
3476 set_mem_alias_set (meml, 0);
3477
3478 memh = change_address (dst, DImode,
3479 gen_rtx_AND (DImode,
3480 plus_constant (dsta, ofs + size - 1),
3481 GEN_INT (-8)));
3482 set_mem_alias_set (memh, 0);
3483
3484 emit_move_insn (dsth, memh);
3485 emit_move_insn (dstl, meml);
3486 if (WORDS_BIG_ENDIAN)
3487 {
3488 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3489
3490 if (src != const0_rtx)
3491 {
3492 switch ((int) size)
3493 {
3494 case 2:
3495 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3496 break;
3497 case 4:
3498 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3499 break;
3500 case 8:
3501 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3502 break;
3503 }
3504 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3505 GEN_INT (size*8), addr));
3506 }
3507
3508 switch ((int) size)
3509 {
3510 case 2:
3511 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3512 break;
3513 case 4:
3514 {
3515 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3516 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3517 break;
3518 }
3519 case 8:
3520 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3521 break;
3522 }
3523
3524 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3525 }
3526 else
3527 {
3528 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3529
3530 if (src != CONST0_RTX (GET_MODE (src)))
3531 {
3532 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3533 GEN_INT (size*8), addr));
3534
3535 switch ((int) size)
3536 {
3537 case 2:
3538 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3539 break;
3540 case 4:
3541 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3542 break;
3543 case 8:
3544 emit_insn (gen_insql_le (insl, src, addr));
3545 break;
3546 }
3547 }
3548
3549 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3550
3551 switch ((int) size)
3552 {
3553 case 2:
3554 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3555 break;
3556 case 4:
3557 {
3558 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3559 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3560 break;
3561 }
3562 case 8:
3563 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3564 break;
3565 }
3566 }
3567
3568 if (src != CONST0_RTX (GET_MODE (src)))
3569 {
3570 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3571 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3572 }
3573
3574 if (WORDS_BIG_ENDIAN)
3575 {
3576 emit_move_insn (meml, dstl);
3577 emit_move_insn (memh, dsth);
3578 }
3579 else
3580 {
3581 /* Must store high before low for degenerate case of aligned. */
3582 emit_move_insn (memh, dsth);
3583 emit_move_insn (meml, dstl);
3584 }
3585 }
3586
3587 /* The block move code tries to maximize speed by separating loads and
3588 stores at the expense of register pressure: we load all of the data
3589 before we store it back out. There are two secondary effects worth
3590 mentioning, that this speeds copying to/from aligned and unaligned
3591 buffers, and that it makes the code significantly easier to write. */
3592
3593 #define MAX_MOVE_WORDS 8
3594
3595 /* Load an integral number of consecutive unaligned quadwords. */
3596
3597 static void
3598 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3599 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3600 {
3601 rtx const im8 = GEN_INT (-8);
3602 rtx const i64 = GEN_INT (64);
3603 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3604 rtx sreg, areg, tmp, smema;
3605 HOST_WIDE_INT i;
3606
3607 smema = XEXP (smem, 0);
3608 if (GET_CODE (smema) == LO_SUM)
3609 smema = force_reg (Pmode, smema);
3610
3611 /* Generate all the tmp registers we need. */
3612 for (i = 0; i < words; ++i)
3613 {
3614 data_regs[i] = out_regs[i];
3615 ext_tmps[i] = gen_reg_rtx (DImode);
3616 }
3617 data_regs[words] = gen_reg_rtx (DImode);
3618
3619 if (ofs != 0)
3620 smem = adjust_address (smem, GET_MODE (smem), ofs);
3621
3622 /* Load up all of the source data. */
3623 for (i = 0; i < words; ++i)
3624 {
3625 tmp = change_address (smem, DImode,
3626 gen_rtx_AND (DImode,
3627 plus_constant (smema, 8*i),
3628 im8));
3629 set_mem_alias_set (tmp, 0);
3630 emit_move_insn (data_regs[i], tmp);
3631 }
3632
3633 tmp = change_address (smem, DImode,
3634 gen_rtx_AND (DImode,
3635 plus_constant (smema, 8*words - 1),
3636 im8));
3637 set_mem_alias_set (tmp, 0);
3638 emit_move_insn (data_regs[words], tmp);
3639
3640 /* Extract the half-word fragments. Unfortunately DEC decided to make
3641 extxh with offset zero a noop instead of zeroing the register, so
3642 we must take care of that edge condition ourselves with cmov. */
3643
3644 sreg = copy_addr_to_reg (smema);
3645 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3646 1, OPTAB_WIDEN);
3647 if (WORDS_BIG_ENDIAN)
3648 emit_move_insn (sreg, plus_constant (sreg, 7));
3649 for (i = 0; i < words; ++i)
3650 {
3651 if (WORDS_BIG_ENDIAN)
3652 {
3653 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3654 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3655 }
3656 else
3657 {
3658 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3659 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3660 }
3661 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3662 gen_rtx_IF_THEN_ELSE (DImode,
3663 gen_rtx_EQ (DImode, areg,
3664 const0_rtx),
3665 const0_rtx, ext_tmps[i])));
3666 }
3667
3668 /* Merge the half-words into whole words. */
3669 for (i = 0; i < words; ++i)
3670 {
3671 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3672 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3673 }
3674 }
3675
3676 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3677 may be NULL to store zeros. */
3678
3679 static void
3680 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3681 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3682 {
3683 rtx const im8 = GEN_INT (-8);
3684 rtx const i64 = GEN_INT (64);
3685 rtx ins_tmps[MAX_MOVE_WORDS];
3686 rtx st_tmp_1, st_tmp_2, dreg;
3687 rtx st_addr_1, st_addr_2, dmema;
3688 HOST_WIDE_INT i;
3689
3690 dmema = XEXP (dmem, 0);
3691 if (GET_CODE (dmema) == LO_SUM)
3692 dmema = force_reg (Pmode, dmema);
3693
3694 /* Generate all the tmp registers we need. */
3695 if (data_regs != NULL)
3696 for (i = 0; i < words; ++i)
3697 ins_tmps[i] = gen_reg_rtx(DImode);
3698 st_tmp_1 = gen_reg_rtx(DImode);
3699 st_tmp_2 = gen_reg_rtx(DImode);
3700
3701 if (ofs != 0)
3702 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3703
3704 st_addr_2 = change_address (dmem, DImode,
3705 gen_rtx_AND (DImode,
3706 plus_constant (dmema, words*8 - 1),
3707 im8));
3708 set_mem_alias_set (st_addr_2, 0);
3709
3710 st_addr_1 = change_address (dmem, DImode,
3711 gen_rtx_AND (DImode, dmema, im8));
3712 set_mem_alias_set (st_addr_1, 0);
3713
3714 /* Load up the destination end bits. */
3715 emit_move_insn (st_tmp_2, st_addr_2);
3716 emit_move_insn (st_tmp_1, st_addr_1);
3717
3718 /* Shift the input data into place. */
3719 dreg = copy_addr_to_reg (dmema);
3720 if (WORDS_BIG_ENDIAN)
3721 emit_move_insn (dreg, plus_constant (dreg, 7));
3722 if (data_regs != NULL)
3723 {
3724 for (i = words-1; i >= 0; --i)
3725 {
3726 if (WORDS_BIG_ENDIAN)
3727 {
3728 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3729 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3730 }
3731 else
3732 {
3733 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3734 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3735 }
3736 }
3737 for (i = words-1; i > 0; --i)
3738 {
3739 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3740 ins_tmps[i-1], ins_tmps[i-1], 1,
3741 OPTAB_WIDEN);
3742 }
3743 }
3744
3745 /* Split and merge the ends with the destination data. */
3746 if (WORDS_BIG_ENDIAN)
3747 {
3748 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3749 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3750 }
3751 else
3752 {
3753 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3754 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3755 }
3756
3757 if (data_regs != NULL)
3758 {
3759 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3760 st_tmp_2, 1, OPTAB_WIDEN);
3761 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3762 st_tmp_1, 1, OPTAB_WIDEN);
3763 }
3764
3765 /* Store it all. */
3766 if (WORDS_BIG_ENDIAN)
3767 emit_move_insn (st_addr_1, st_tmp_1);
3768 else
3769 emit_move_insn (st_addr_2, st_tmp_2);
3770 for (i = words-1; i > 0; --i)
3771 {
3772 rtx tmp = change_address (dmem, DImode,
3773 gen_rtx_AND (DImode,
3774 plus_constant(dmema,
3775 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3776 im8));
3777 set_mem_alias_set (tmp, 0);
3778 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3779 }
3780 if (WORDS_BIG_ENDIAN)
3781 emit_move_insn (st_addr_2, st_tmp_2);
3782 else
3783 emit_move_insn (st_addr_1, st_tmp_1);
3784 }
3785
3786
3787 /* Expand string/block move operations.
3788
3789 operands[0] is the pointer to the destination.
3790 operands[1] is the pointer to the source.
3791 operands[2] is the number of bytes to move.
3792 operands[3] is the alignment. */
3793
3794 int
3795 alpha_expand_block_move (rtx operands[])
3796 {
3797 rtx bytes_rtx = operands[2];
3798 rtx align_rtx = operands[3];
3799 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3800 HOST_WIDE_INT bytes = orig_bytes;
3801 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3802 HOST_WIDE_INT dst_align = src_align;
3803 rtx orig_src = operands[1];
3804 rtx orig_dst = operands[0];
3805 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3806 rtx tmp;
3807 unsigned int i, words, ofs, nregs = 0;
3808
3809 if (orig_bytes <= 0)
3810 return 1;
3811 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3812 return 0;
3813
3814 /* Look for additional alignment information from recorded register info. */
3815
3816 tmp = XEXP (orig_src, 0);
3817 if (GET_CODE (tmp) == REG)
3818 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3819 else if (GET_CODE (tmp) == PLUS
3820 && GET_CODE (XEXP (tmp, 0)) == REG
3821 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3822 {
3823 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3824 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3825
3826 if (a > src_align)
3827 {
3828 if (a >= 64 && c % 8 == 0)
3829 src_align = 64;
3830 else if (a >= 32 && c % 4 == 0)
3831 src_align = 32;
3832 else if (a >= 16 && c % 2 == 0)
3833 src_align = 16;
3834 }
3835 }
3836
3837 tmp = XEXP (orig_dst, 0);
3838 if (GET_CODE (tmp) == REG)
3839 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3840 else if (GET_CODE (tmp) == PLUS
3841 && GET_CODE (XEXP (tmp, 0)) == REG
3842 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3843 {
3844 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3845 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3846
3847 if (a > dst_align)
3848 {
3849 if (a >= 64 && c % 8 == 0)
3850 dst_align = 64;
3851 else if (a >= 32 && c % 4 == 0)
3852 dst_align = 32;
3853 else if (a >= 16 && c % 2 == 0)
3854 dst_align = 16;
3855 }
3856 }
3857
3858 ofs = 0;
3859 if (src_align >= 64 && bytes >= 8)
3860 {
3861 words = bytes / 8;
3862
3863 for (i = 0; i < words; ++i)
3864 data_regs[nregs + i] = gen_reg_rtx (DImode);
3865
3866 for (i = 0; i < words; ++i)
3867 emit_move_insn (data_regs[nregs + i],
3868 adjust_address (orig_src, DImode, ofs + i * 8));
3869
3870 nregs += words;
3871 bytes -= words * 8;
3872 ofs += words * 8;
3873 }
3874
3875 if (src_align >= 32 && bytes >= 4)
3876 {
3877 words = bytes / 4;
3878
3879 for (i = 0; i < words; ++i)
3880 data_regs[nregs + i] = gen_reg_rtx (SImode);
3881
3882 for (i = 0; i < words; ++i)
3883 emit_move_insn (data_regs[nregs + i],
3884 adjust_address (orig_src, SImode, ofs + i * 4));
3885
3886 nregs += words;
3887 bytes -= words * 4;
3888 ofs += words * 4;
3889 }
3890
3891 if (bytes >= 8)
3892 {
3893 words = bytes / 8;
3894
3895 for (i = 0; i < words+1; ++i)
3896 data_regs[nregs + i] = gen_reg_rtx (DImode);
3897
3898 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3899 words, ofs);
3900
3901 nregs += words;
3902 bytes -= words * 8;
3903 ofs += words * 8;
3904 }
3905
3906 if (! TARGET_BWX && bytes >= 4)
3907 {
3908 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3909 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3910 bytes -= 4;
3911 ofs += 4;
3912 }
3913
3914 if (bytes >= 2)
3915 {
3916 if (src_align >= 16)
3917 {
3918 do {
3919 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3921 bytes -= 2;
3922 ofs += 2;
3923 } while (bytes >= 2);
3924 }
3925 else if (! TARGET_BWX)
3926 {
3927 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3928 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3929 bytes -= 2;
3930 ofs += 2;
3931 }
3932 }
3933
3934 while (bytes > 0)
3935 {
3936 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3937 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3938 bytes -= 1;
3939 ofs += 1;
3940 }
3941
3942 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3943
3944 /* Now save it back out again. */
3945
3946 i = 0, ofs = 0;
3947
3948 /* Write out the data in whatever chunks reading the source allowed. */
3949 if (dst_align >= 64)
3950 {
3951 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3952 {
3953 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3954 data_regs[i]);
3955 ofs += 8;
3956 i++;
3957 }
3958 }
3959
3960 if (dst_align >= 32)
3961 {
3962 /* If the source has remaining DImode regs, write them out in
3963 two pieces. */
3964 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3965 {
3966 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3967 NULL_RTX, 1, OPTAB_WIDEN);
3968
3969 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3970 gen_lowpart (SImode, data_regs[i]));
3971 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3972 gen_lowpart (SImode, tmp));
3973 ofs += 8;
3974 i++;
3975 }
3976
3977 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3978 {
3979 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3980 data_regs[i]);
3981 ofs += 4;
3982 i++;
3983 }
3984 }
3985
3986 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3987 {
3988 /* Write out a remaining block of words using unaligned methods. */
3989
3990 for (words = 1; i + words < nregs; words++)
3991 if (GET_MODE (data_regs[i + words]) != DImode)
3992 break;
3993
3994 if (words == 1)
3995 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3996 else
3997 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3998 words, ofs);
3999
4000 i += words;
4001 ofs += words * 8;
4002 }
4003
4004 /* Due to the above, this won't be aligned. */
4005 /* ??? If we have more than one of these, consider constructing full
4006 words in registers and using alpha_expand_unaligned_store_words. */
4007 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4008 {
4009 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4010 ofs += 4;
4011 i++;
4012 }
4013
4014 if (dst_align >= 16)
4015 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4016 {
4017 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4018 i++;
4019 ofs += 2;
4020 }
4021 else
4022 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4023 {
4024 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4025 i++;
4026 ofs += 2;
4027 }
4028
4029 /* The remainder must be byte copies. */
4030 while (i < nregs)
4031 {
4032 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4033 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4034 i++;
4035 ofs += 1;
4036 }
4037
4038 return 1;
4039 }
4040
4041 int
4042 alpha_expand_block_clear (rtx operands[])
4043 {
4044 rtx bytes_rtx = operands[1];
4045 rtx align_rtx = operands[3];
4046 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4047 HOST_WIDE_INT bytes = orig_bytes;
4048 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4049 HOST_WIDE_INT alignofs = 0;
4050 rtx orig_dst = operands[0];
4051 rtx tmp;
4052 int i, words, ofs = 0;
4053
4054 if (orig_bytes <= 0)
4055 return 1;
4056 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4057 return 0;
4058
4059 /* Look for stricter alignment. */
4060 tmp = XEXP (orig_dst, 0);
4061 if (GET_CODE (tmp) == REG)
4062 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4063 else if (GET_CODE (tmp) == PLUS
4064 && GET_CODE (XEXP (tmp, 0)) == REG
4065 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4066 {
4067 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4068 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4069
4070 if (a > align)
4071 {
4072 if (a >= 64)
4073 align = a, alignofs = 8 - c % 8;
4074 else if (a >= 32)
4075 align = a, alignofs = 4 - c % 4;
4076 else if (a >= 16)
4077 align = a, alignofs = 2 - c % 2;
4078 }
4079 }
4080
4081 /* Handle an unaligned prefix first. */
4082
4083 if (alignofs > 0)
4084 {
4085 #if HOST_BITS_PER_WIDE_INT >= 64
4086 /* Given that alignofs is bounded by align, the only time BWX could
4087 generate three stores is for a 7 byte fill. Prefer two individual
4088 stores over a load/mask/store sequence. */
4089 if ((!TARGET_BWX || alignofs == 7)
4090 && align >= 32
4091 && !(alignofs == 4 && bytes >= 4))
4092 {
4093 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4094 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4095 rtx mem, tmp;
4096 HOST_WIDE_INT mask;
4097
4098 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4099 set_mem_alias_set (mem, 0);
4100
4101 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4102 if (bytes < alignofs)
4103 {
4104 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4105 ofs += bytes;
4106 bytes = 0;
4107 }
4108 else
4109 {
4110 bytes -= alignofs;
4111 ofs += alignofs;
4112 }
4113 alignofs = 0;
4114
4115 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4116 NULL_RTX, 1, OPTAB_WIDEN);
4117
4118 emit_move_insn (mem, tmp);
4119 }
4120 #endif
4121
4122 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4123 {
4124 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4125 bytes -= 1;
4126 ofs += 1;
4127 alignofs -= 1;
4128 }
4129 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4130 {
4131 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4132 bytes -= 2;
4133 ofs += 2;
4134 alignofs -= 2;
4135 }
4136 if (alignofs == 4 && bytes >= 4)
4137 {
4138 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4139 bytes -= 4;
4140 ofs += 4;
4141 alignofs = 0;
4142 }
4143
4144 /* If we've not used the extra lead alignment information by now,
4145 we won't be able to. Downgrade align to match what's left over. */
4146 if (alignofs > 0)
4147 {
4148 alignofs = alignofs & -alignofs;
4149 align = MIN (align, alignofs * BITS_PER_UNIT);
4150 }
4151 }
4152
4153 /* Handle a block of contiguous long-words. */
4154
4155 if (align >= 64 && bytes >= 8)
4156 {
4157 words = bytes / 8;
4158
4159 for (i = 0; i < words; ++i)
4160 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4161 const0_rtx);
4162
4163 bytes -= words * 8;
4164 ofs += words * 8;
4165 }
4166
4167 /* If the block is large and appropriately aligned, emit a single
4168 store followed by a sequence of stq_u insns. */
4169
4170 if (align >= 32 && bytes > 16)
4171 {
4172 rtx orig_dsta;
4173
4174 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4175 bytes -= 4;
4176 ofs += 4;
4177
4178 orig_dsta = XEXP (orig_dst, 0);
4179 if (GET_CODE (orig_dsta) == LO_SUM)
4180 orig_dsta = force_reg (Pmode, orig_dsta);
4181
4182 words = bytes / 8;
4183 for (i = 0; i < words; ++i)
4184 {
4185 rtx mem
4186 = change_address (orig_dst, DImode,
4187 gen_rtx_AND (DImode,
4188 plus_constant (orig_dsta, ofs + i*8),
4189 GEN_INT (-8)));
4190 set_mem_alias_set (mem, 0);
4191 emit_move_insn (mem, const0_rtx);
4192 }
4193
4194 /* Depending on the alignment, the first stq_u may have overlapped
4195 with the initial stl, which means that the last stq_u didn't
4196 write as much as it would appear. Leave those questionable bytes
4197 unaccounted for. */
4198 bytes -= words * 8 - 4;
4199 ofs += words * 8 - 4;
4200 }
4201
4202 /* Handle a smaller block of aligned words. */
4203
4204 if ((align >= 64 && bytes == 4)
4205 || (align == 32 && bytes >= 4))
4206 {
4207 words = bytes / 4;
4208
4209 for (i = 0; i < words; ++i)
4210 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4211 const0_rtx);
4212
4213 bytes -= words * 4;
4214 ofs += words * 4;
4215 }
4216
4217 /* An unaligned block uses stq_u stores for as many as possible. */
4218
4219 if (bytes >= 8)
4220 {
4221 words = bytes / 8;
4222
4223 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4224
4225 bytes -= words * 8;
4226 ofs += words * 8;
4227 }
4228
4229 /* Next clean up any trailing pieces. */
4230
4231 #if HOST_BITS_PER_WIDE_INT >= 64
4232 /* Count the number of bits in BYTES for which aligned stores could
4233 be emitted. */
4234 words = 0;
4235 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4236 if (bytes & i)
4237 words += 1;
4238
4239 /* If we have appropriate alignment (and it wouldn't take too many
4240 instructions otherwise), mask out the bytes we need. */
4241 if (TARGET_BWX ? words > 2 : bytes > 0)
4242 {
4243 if (align >= 64)
4244 {
4245 rtx mem, tmp;
4246 HOST_WIDE_INT mask;
4247
4248 mem = adjust_address (orig_dst, DImode, ofs);
4249 set_mem_alias_set (mem, 0);
4250
4251 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4252
4253 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4254 NULL_RTX, 1, OPTAB_WIDEN);
4255
4256 emit_move_insn (mem, tmp);
4257 return 1;
4258 }
4259 else if (align >= 32 && bytes < 4)
4260 {
4261 rtx mem, tmp;
4262 HOST_WIDE_INT mask;
4263
4264 mem = adjust_address (orig_dst, SImode, ofs);
4265 set_mem_alias_set (mem, 0);
4266
4267 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4268
4269 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4270 NULL_RTX, 1, OPTAB_WIDEN);
4271
4272 emit_move_insn (mem, tmp);
4273 return 1;
4274 }
4275 }
4276 #endif
4277
4278 if (!TARGET_BWX && bytes >= 4)
4279 {
4280 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4281 bytes -= 4;
4282 ofs += 4;
4283 }
4284
4285 if (bytes >= 2)
4286 {
4287 if (align >= 16)
4288 {
4289 do {
4290 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4291 const0_rtx);
4292 bytes -= 2;
4293 ofs += 2;
4294 } while (bytes >= 2);
4295 }
4296 else if (! TARGET_BWX)
4297 {
4298 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4299 bytes -= 2;
4300 ofs += 2;
4301 }
4302 }
4303
4304 while (bytes > 0)
4305 {
4306 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4307 bytes -= 1;
4308 ofs += 1;
4309 }
4310
4311 return 1;
4312 }
4313
4314 /* Returns a mask so that zap(x, value) == x & mask. */
4315
4316 rtx
4317 alpha_expand_zap_mask (HOST_WIDE_INT value)
4318 {
4319 rtx result;
4320 int i;
4321
4322 if (HOST_BITS_PER_WIDE_INT >= 64)
4323 {
4324 HOST_WIDE_INT mask = 0;
4325
4326 for (i = 7; i >= 0; --i)
4327 {
4328 mask <<= 8;
4329 if (!((value >> i) & 1))
4330 mask |= 0xff;
4331 }
4332
4333 result = gen_int_mode (mask, DImode);
4334 }
4335 else
4336 {
4337 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4338
4339 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4340
4341 for (i = 7; i >= 4; --i)
4342 {
4343 mask_hi <<= 8;
4344 if (!((value >> i) & 1))
4345 mask_hi |= 0xff;
4346 }
4347
4348 for (i = 3; i >= 0; --i)
4349 {
4350 mask_lo <<= 8;
4351 if (!((value >> i) & 1))
4352 mask_lo |= 0xff;
4353 }
4354
4355 result = immed_double_const (mask_lo, mask_hi, DImode);
4356 }
4357
4358 return result;
4359 }
4360
4361 void
4362 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4363 enum machine_mode mode,
4364 rtx op0, rtx op1, rtx op2)
4365 {
4366 op0 = gen_lowpart (mode, op0);
4367
4368 if (op1 == const0_rtx)
4369 op1 = CONST0_RTX (mode);
4370 else
4371 op1 = gen_lowpart (mode, op1);
4372
4373 if (op2 == const0_rtx)
4374 op2 = CONST0_RTX (mode);
4375 else
4376 op2 = gen_lowpart (mode, op2);
4377
4378 emit_insn ((*gen) (op0, op1, op2));
4379 }
4380
4381 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4382 COND is true. Mark the jump as unlikely to be taken. */
4383
4384 static void
4385 emit_unlikely_jump (rtx cond, rtx label)
4386 {
4387 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4388 rtx x;
4389
4390 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4391 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4392 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4393 }
4394
4395 /* A subroutine of the atomic operation splitters. Emit a load-locked
4396 instruction in MODE. */
4397
4398 static void
4399 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4400 {
4401 rtx (*fn) (rtx, rtx) = NULL;
4402 if (mode == SImode)
4403 fn = gen_load_locked_si;
4404 else if (mode == DImode)
4405 fn = gen_load_locked_di;
4406 emit_insn (fn (reg, mem));
4407 }
4408
4409 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4410 instruction in MODE. */
4411
4412 static void
4413 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4414 {
4415 rtx (*fn) (rtx, rtx, rtx) = NULL;
4416 if (mode == SImode)
4417 fn = gen_store_conditional_si;
4418 else if (mode == DImode)
4419 fn = gen_store_conditional_di;
4420 emit_insn (fn (res, mem, val));
4421 }
4422
4423 /* A subroutine of the atomic operation splitters. Emit an insxl
4424 instruction in MODE. */
4425
4426 static rtx
4427 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4428 {
4429 rtx ret = gen_reg_rtx (DImode);
4430 rtx (*fn) (rtx, rtx, rtx);
4431
4432 if (WORDS_BIG_ENDIAN)
4433 {
4434 if (mode == QImode)
4435 fn = gen_insbl_be;
4436 else
4437 fn = gen_inswl_be;
4438 }
4439 else
4440 {
4441 if (mode == QImode)
4442 fn = gen_insbl_le;
4443 else
4444 fn = gen_inswl_le;
4445 }
4446 /* The insbl and inswl patterns require a register operand. */
4447 op1 = force_reg (mode, op1);
4448 emit_insn (fn (ret, op1, op2));
4449
4450 return ret;
4451 }
4452
4453 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4454 to perform. MEM is the memory on which to operate. VAL is the second
4455 operand of the binary operator. BEFORE and AFTER are optional locations to
4456 return the value of MEM either before of after the operation. SCRATCH is
4457 a scratch register. */
4458
4459 void
4460 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4461 rtx before, rtx after, rtx scratch)
4462 {
4463 enum machine_mode mode = GET_MODE (mem);
4464 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4465
4466 emit_insn (gen_memory_barrier ());
4467
4468 label = gen_label_rtx ();
4469 emit_label (label);
4470 label = gen_rtx_LABEL_REF (DImode, label);
4471
4472 if (before == NULL)
4473 before = scratch;
4474 emit_load_locked (mode, before, mem);
4475
4476 if (code == NOT)
4477 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4478 else
4479 x = gen_rtx_fmt_ee (code, mode, before, val);
4480 if (after)
4481 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4482 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4483
4484 emit_store_conditional (mode, cond, mem, scratch);
4485
4486 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4487 emit_unlikely_jump (x, label);
4488
4489 emit_insn (gen_memory_barrier ());
4490 }
4491
4492 /* Expand a compare and swap operation. */
4493
4494 void
4495 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4496 rtx scratch)
4497 {
4498 enum machine_mode mode = GET_MODE (mem);
4499 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4500
4501 emit_insn (gen_memory_barrier ());
4502
4503 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4504 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4505 emit_label (XEXP (label1, 0));
4506
4507 emit_load_locked (mode, retval, mem);
4508
4509 x = gen_lowpart (DImode, retval);
4510 if (oldval == const0_rtx)
4511 x = gen_rtx_NE (DImode, x, const0_rtx);
4512 else
4513 {
4514 x = gen_rtx_EQ (DImode, x, oldval);
4515 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4516 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4517 }
4518 emit_unlikely_jump (x, label2);
4519
4520 emit_move_insn (scratch, newval);
4521 emit_store_conditional (mode, cond, mem, scratch);
4522
4523 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4524 emit_unlikely_jump (x, label1);
4525
4526 emit_insn (gen_memory_barrier ());
4527 emit_label (XEXP (label2, 0));
4528 }
4529
4530 void
4531 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4532 {
4533 enum machine_mode mode = GET_MODE (mem);
4534 rtx addr, align, wdst;
4535 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4536
4537 addr = force_reg (DImode, XEXP (mem, 0));
4538 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4539 NULL_RTX, 1, OPTAB_DIRECT);
4540
4541 oldval = convert_modes (DImode, mode, oldval, 1);
4542 newval = emit_insxl (mode, newval, addr);
4543
4544 wdst = gen_reg_rtx (DImode);
4545 if (mode == QImode)
4546 fn5 = gen_sync_compare_and_swapqi_1;
4547 else
4548 fn5 = gen_sync_compare_and_swaphi_1;
4549 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4550
4551 emit_move_insn (dst, gen_lowpart (mode, wdst));
4552 }
4553
4554 void
4555 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4556 rtx oldval, rtx newval, rtx align,
4557 rtx scratch, rtx cond)
4558 {
4559 rtx label1, label2, mem, width, mask, x;
4560
4561 mem = gen_rtx_MEM (DImode, align);
4562 MEM_VOLATILE_P (mem) = 1;
4563
4564 emit_insn (gen_memory_barrier ());
4565 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4566 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4567 emit_label (XEXP (label1, 0));
4568
4569 emit_load_locked (DImode, scratch, mem);
4570
4571 width = GEN_INT (GET_MODE_BITSIZE (mode));
4572 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4573 if (WORDS_BIG_ENDIAN)
4574 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4575 else
4576 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4577
4578 if (oldval == const0_rtx)
4579 x = gen_rtx_NE (DImode, dest, const0_rtx);
4580 else
4581 {
4582 x = gen_rtx_EQ (DImode, dest, oldval);
4583 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4584 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4585 }
4586 emit_unlikely_jump (x, label2);
4587
4588 if (WORDS_BIG_ENDIAN)
4589 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4590 else
4591 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4592 emit_insn (gen_iordi3 (scratch, scratch, newval));
4593
4594 emit_store_conditional (DImode, scratch, mem, scratch);
4595
4596 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4597 emit_unlikely_jump (x, label1);
4598
4599 emit_insn (gen_memory_barrier ());
4600 emit_label (XEXP (label2, 0));
4601 }
4602
4603 /* Expand an atomic exchange operation. */
4604
4605 void
4606 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4607 {
4608 enum machine_mode mode = GET_MODE (mem);
4609 rtx label, x, cond = gen_lowpart (DImode, scratch);
4610
4611 emit_insn (gen_memory_barrier ());
4612
4613 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4614 emit_label (XEXP (label, 0));
4615
4616 emit_load_locked (mode, retval, mem);
4617 emit_move_insn (scratch, val);
4618 emit_store_conditional (mode, cond, mem, scratch);
4619
4620 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4621 emit_unlikely_jump (x, label);
4622 }
4623
4624 void
4625 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4626 {
4627 enum machine_mode mode = GET_MODE (mem);
4628 rtx addr, align, wdst;
4629 rtx (*fn4) (rtx, rtx, rtx, rtx);
4630
4631 /* Force the address into a register. */
4632 addr = force_reg (DImode, XEXP (mem, 0));
4633
4634 /* Align it to a multiple of 8. */
4635 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4636 NULL_RTX, 1, OPTAB_DIRECT);
4637
4638 /* Insert val into the correct byte location within the word. */
4639 val = emit_insxl (mode, val, addr);
4640
4641 wdst = gen_reg_rtx (DImode);
4642 if (mode == QImode)
4643 fn4 = gen_sync_lock_test_and_setqi_1;
4644 else
4645 fn4 = gen_sync_lock_test_and_sethi_1;
4646 emit_insn (fn4 (wdst, addr, val, align));
4647
4648 emit_move_insn (dst, gen_lowpart (mode, wdst));
4649 }
4650
4651 void
4652 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4653 rtx val, rtx align, rtx scratch)
4654 {
4655 rtx label, mem, width, mask, x;
4656
4657 mem = gen_rtx_MEM (DImode, align);
4658 MEM_VOLATILE_P (mem) = 1;
4659
4660 emit_insn (gen_memory_barrier ());
4661 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4662 emit_label (XEXP (label, 0));
4663
4664 emit_load_locked (DImode, scratch, mem);
4665
4666 width = GEN_INT (GET_MODE_BITSIZE (mode));
4667 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4668 if (WORDS_BIG_ENDIAN)
4669 {
4670 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4671 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4672 }
4673 else
4674 {
4675 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4676 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4677 }
4678 emit_insn (gen_iordi3 (scratch, scratch, val));
4679
4680 emit_store_conditional (DImode, scratch, mem, scratch);
4681
4682 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4683 emit_unlikely_jump (x, label);
4684 }
4685 \f
4686 /* Adjust the cost of a scheduling dependency. Return the new cost of
4687 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4688
4689 static int
4690 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4691 {
4692 enum attr_type insn_type, dep_insn_type;
4693
4694 /* If the dependence is an anti-dependence, there is no cost. For an
4695 output dependence, there is sometimes a cost, but it doesn't seem
4696 worth handling those few cases. */
4697 if (REG_NOTE_KIND (link) != 0)
4698 return cost;
4699
4700 /* If we can't recognize the insns, we can't really do anything. */
4701 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4702 return cost;
4703
4704 insn_type = get_attr_type (insn);
4705 dep_insn_type = get_attr_type (dep_insn);
4706
4707 /* Bring in the user-defined memory latency. */
4708 if (dep_insn_type == TYPE_ILD
4709 || dep_insn_type == TYPE_FLD
4710 || dep_insn_type == TYPE_LDSYM)
4711 cost += alpha_memory_latency-1;
4712
4713 /* Everything else handled in DFA bypasses now. */
4714
4715 return cost;
4716 }
4717
4718 /* The number of instructions that can be issued per cycle. */
4719
4720 static int
4721 alpha_issue_rate (void)
4722 {
4723 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4724 }
4725
4726 /* How many alternative schedules to try. This should be as wide as the
4727 scheduling freedom in the DFA, but no wider. Making this value too
4728 large results extra work for the scheduler.
4729
4730 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4731 alternative schedules. For EV5, we can choose between E0/E1 and
4732 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4733
4734 static int
4735 alpha_multipass_dfa_lookahead (void)
4736 {
4737 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4738 }
4739 \f
4740 /* Machine-specific function data. */
4741
4742 struct machine_function GTY(())
4743 {
4744 /* For unicosmk. */
4745 /* List of call information words for calls from this function. */
4746 struct rtx_def *first_ciw;
4747 struct rtx_def *last_ciw;
4748 int ciw_count;
4749
4750 /* List of deferred case vectors. */
4751 struct rtx_def *addr_list;
4752
4753 /* For OSF. */
4754 const char *some_ld_name;
4755
4756 /* For TARGET_LD_BUGGY_LDGP. */
4757 struct rtx_def *gp_save_rtx;
4758 };
4759
4760 /* How to allocate a 'struct machine_function'. */
4761
4762 static struct machine_function *
4763 alpha_init_machine_status (void)
4764 {
4765 return ((struct machine_function *)
4766 ggc_alloc_cleared (sizeof (struct machine_function)));
4767 }
4768
4769 /* Functions to save and restore alpha_return_addr_rtx. */
4770
4771 /* Start the ball rolling with RETURN_ADDR_RTX. */
4772
4773 rtx
4774 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4775 {
4776 if (count != 0)
4777 return const0_rtx;
4778
4779 return get_hard_reg_initial_val (Pmode, REG_RA);
4780 }
4781
4782 /* Return or create a memory slot containing the gp value for the current
4783 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4784
4785 rtx
4786 alpha_gp_save_rtx (void)
4787 {
4788 rtx seq, m = cfun->machine->gp_save_rtx;
4789
4790 if (m == NULL)
4791 {
4792 start_sequence ();
4793
4794 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4795 m = validize_mem (m);
4796 emit_move_insn (m, pic_offset_table_rtx);
4797
4798 seq = get_insns ();
4799 end_sequence ();
4800 emit_insn_at_entry (seq);
4801
4802 cfun->machine->gp_save_rtx = m;
4803 }
4804
4805 return m;
4806 }
4807
4808 static int
4809 alpha_ra_ever_killed (void)
4810 {
4811 rtx top;
4812
4813 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4814 return regs_ever_live[REG_RA];
4815
4816 push_topmost_sequence ();
4817 top = get_insns ();
4818 pop_topmost_sequence ();
4819
4820 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4821 }
4822
4823 \f
4824 /* Return the trap mode suffix applicable to the current
4825 instruction, or NULL. */
4826
4827 static const char *
4828 get_trap_mode_suffix (void)
4829 {
4830 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4831
4832 switch (s)
4833 {
4834 case TRAP_SUFFIX_NONE:
4835 return NULL;
4836
4837 case TRAP_SUFFIX_SU:
4838 if (alpha_fptm >= ALPHA_FPTM_SU)
4839 return "su";
4840 return NULL;
4841
4842 case TRAP_SUFFIX_SUI:
4843 if (alpha_fptm >= ALPHA_FPTM_SUI)
4844 return "sui";
4845 return NULL;
4846
4847 case TRAP_SUFFIX_V_SV:
4848 switch (alpha_fptm)
4849 {
4850 case ALPHA_FPTM_N:
4851 return NULL;
4852 case ALPHA_FPTM_U:
4853 return "v";
4854 case ALPHA_FPTM_SU:
4855 case ALPHA_FPTM_SUI:
4856 return "sv";
4857 default:
4858 gcc_unreachable ();
4859 }
4860
4861 case TRAP_SUFFIX_V_SV_SVI:
4862 switch (alpha_fptm)
4863 {
4864 case ALPHA_FPTM_N:
4865 return NULL;
4866 case ALPHA_FPTM_U:
4867 return "v";
4868 case ALPHA_FPTM_SU:
4869 return "sv";
4870 case ALPHA_FPTM_SUI:
4871 return "svi";
4872 default:
4873 gcc_unreachable ();
4874 }
4875 break;
4876
4877 case TRAP_SUFFIX_U_SU_SUI:
4878 switch (alpha_fptm)
4879 {
4880 case ALPHA_FPTM_N:
4881 return NULL;
4882 case ALPHA_FPTM_U:
4883 return "u";
4884 case ALPHA_FPTM_SU:
4885 return "su";
4886 case ALPHA_FPTM_SUI:
4887 return "sui";
4888 default:
4889 gcc_unreachable ();
4890 }
4891 break;
4892
4893 default:
4894 gcc_unreachable ();
4895 }
4896 gcc_unreachable ();
4897 }
4898
4899 /* Return the rounding mode suffix applicable to the current
4900 instruction, or NULL. */
4901
4902 static const char *
4903 get_round_mode_suffix (void)
4904 {
4905 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4906
4907 switch (s)
4908 {
4909 case ROUND_SUFFIX_NONE:
4910 return NULL;
4911 case ROUND_SUFFIX_NORMAL:
4912 switch (alpha_fprm)
4913 {
4914 case ALPHA_FPRM_NORM:
4915 return NULL;
4916 case ALPHA_FPRM_MINF:
4917 return "m";
4918 case ALPHA_FPRM_CHOP:
4919 return "c";
4920 case ALPHA_FPRM_DYN:
4921 return "d";
4922 default:
4923 gcc_unreachable ();
4924 }
4925 break;
4926
4927 case ROUND_SUFFIX_C:
4928 return "c";
4929
4930 default:
4931 gcc_unreachable ();
4932 }
4933 gcc_unreachable ();
4934 }
4935
4936 /* Locate some local-dynamic symbol still in use by this function
4937 so that we can print its name in some movdi_er_tlsldm pattern. */
4938
4939 static int
4940 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4941 {
4942 rtx x = *px;
4943
4944 if (GET_CODE (x) == SYMBOL_REF
4945 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4946 {
4947 cfun->machine->some_ld_name = XSTR (x, 0);
4948 return 1;
4949 }
4950
4951 return 0;
4952 }
4953
4954 static const char *
4955 get_some_local_dynamic_name (void)
4956 {
4957 rtx insn;
4958
4959 if (cfun->machine->some_ld_name)
4960 return cfun->machine->some_ld_name;
4961
4962 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4963 if (INSN_P (insn)
4964 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4965 return cfun->machine->some_ld_name;
4966
4967 gcc_unreachable ();
4968 }
4969
4970 /* Print an operand. Recognize special options, documented below. */
4971
4972 void
4973 print_operand (FILE *file, rtx x, int code)
4974 {
4975 int i;
4976
4977 switch (code)
4978 {
4979 case '~':
4980 /* Print the assembler name of the current function. */
4981 assemble_name (file, alpha_fnname);
4982 break;
4983
4984 case '&':
4985 assemble_name (file, get_some_local_dynamic_name ());
4986 break;
4987
4988 case '/':
4989 {
4990 const char *trap = get_trap_mode_suffix ();
4991 const char *round = get_round_mode_suffix ();
4992
4993 if (trap || round)
4994 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4995 (trap ? trap : ""), (round ? round : ""));
4996 break;
4997 }
4998
4999 case ',':
5000 /* Generates single precision instruction suffix. */
5001 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5002 break;
5003
5004 case '-':
5005 /* Generates double precision instruction suffix. */
5006 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5007 break;
5008
5009 case '#':
5010 if (alpha_this_literal_sequence_number == 0)
5011 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5012 fprintf (file, "%d", alpha_this_literal_sequence_number);
5013 break;
5014
5015 case '*':
5016 if (alpha_this_gpdisp_sequence_number == 0)
5017 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5018 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5019 break;
5020
5021 case 'H':
5022 if (GET_CODE (x) == HIGH)
5023 output_addr_const (file, XEXP (x, 0));
5024 else
5025 output_operand_lossage ("invalid %%H value");
5026 break;
5027
5028 case 'J':
5029 {
5030 const char *lituse;
5031
5032 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5033 {
5034 x = XVECEXP (x, 0, 0);
5035 lituse = "lituse_tlsgd";
5036 }
5037 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5038 {
5039 x = XVECEXP (x, 0, 0);
5040 lituse = "lituse_tlsldm";
5041 }
5042 else if (GET_CODE (x) == CONST_INT)
5043 lituse = "lituse_jsr";
5044 else
5045 {
5046 output_operand_lossage ("invalid %%J value");
5047 break;
5048 }
5049
5050 if (x != const0_rtx)
5051 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5052 }
5053 break;
5054
5055 case 'j':
5056 {
5057 const char *lituse;
5058
5059 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5060 lituse = "lituse_jsrdirect";
5061 #else
5062 lituse = "lituse_jsr";
5063 #endif
5064
5065 gcc_assert (INTVAL (x) != 0);
5066 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5067 }
5068 break;
5069 case 'r':
5070 /* If this operand is the constant zero, write it as "$31". */
5071 if (GET_CODE (x) == REG)
5072 fprintf (file, "%s", reg_names[REGNO (x)]);
5073 else if (x == CONST0_RTX (GET_MODE (x)))
5074 fprintf (file, "$31");
5075 else
5076 output_operand_lossage ("invalid %%r value");
5077 break;
5078
5079 case 'R':
5080 /* Similar, but for floating-point. */
5081 if (GET_CODE (x) == REG)
5082 fprintf (file, "%s", reg_names[REGNO (x)]);
5083 else if (x == CONST0_RTX (GET_MODE (x)))
5084 fprintf (file, "$f31");
5085 else
5086 output_operand_lossage ("invalid %%R value");
5087 break;
5088
5089 case 'N':
5090 /* Write the 1's complement of a constant. */
5091 if (GET_CODE (x) != CONST_INT)
5092 output_operand_lossage ("invalid %%N value");
5093
5094 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5095 break;
5096
5097 case 'P':
5098 /* Write 1 << C, for a constant C. */
5099 if (GET_CODE (x) != CONST_INT)
5100 output_operand_lossage ("invalid %%P value");
5101
5102 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5103 break;
5104
5105 case 'h':
5106 /* Write the high-order 16 bits of a constant, sign-extended. */
5107 if (GET_CODE (x) != CONST_INT)
5108 output_operand_lossage ("invalid %%h value");
5109
5110 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5111 break;
5112
5113 case 'L':
5114 /* Write the low-order 16 bits of a constant, sign-extended. */
5115 if (GET_CODE (x) != CONST_INT)
5116 output_operand_lossage ("invalid %%L value");
5117
5118 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5119 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5120 break;
5121
5122 case 'm':
5123 /* Write mask for ZAP insn. */
5124 if (GET_CODE (x) == CONST_DOUBLE)
5125 {
5126 HOST_WIDE_INT mask = 0;
5127 HOST_WIDE_INT value;
5128
5129 value = CONST_DOUBLE_LOW (x);
5130 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5131 i++, value >>= 8)
5132 if (value & 0xff)
5133 mask |= (1 << i);
5134
5135 value = CONST_DOUBLE_HIGH (x);
5136 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5137 i++, value >>= 8)
5138 if (value & 0xff)
5139 mask |= (1 << (i + sizeof (int)));
5140
5141 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5142 }
5143
5144 else if (GET_CODE (x) == CONST_INT)
5145 {
5146 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5147
5148 for (i = 0; i < 8; i++, value >>= 8)
5149 if (value & 0xff)
5150 mask |= (1 << i);
5151
5152 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5153 }
5154 else
5155 output_operand_lossage ("invalid %%m value");
5156 break;
5157
5158 case 'M':
5159 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5160 if (GET_CODE (x) != CONST_INT
5161 || (INTVAL (x) != 8 && INTVAL (x) != 16
5162 && INTVAL (x) != 32 && INTVAL (x) != 64))
5163 output_operand_lossage ("invalid %%M value");
5164
5165 fprintf (file, "%s",
5166 (INTVAL (x) == 8 ? "b"
5167 : INTVAL (x) == 16 ? "w"
5168 : INTVAL (x) == 32 ? "l"
5169 : "q"));
5170 break;
5171
5172 case 'U':
5173 /* Similar, except do it from the mask. */
5174 if (GET_CODE (x) == CONST_INT)
5175 {
5176 HOST_WIDE_INT value = INTVAL (x);
5177
5178 if (value == 0xff)
5179 {
5180 fputc ('b', file);
5181 break;
5182 }
5183 if (value == 0xffff)
5184 {
5185 fputc ('w', file);
5186 break;
5187 }
5188 if (value == 0xffffffff)
5189 {
5190 fputc ('l', file);
5191 break;
5192 }
5193 if (value == -1)
5194 {
5195 fputc ('q', file);
5196 break;
5197 }
5198 }
5199 else if (HOST_BITS_PER_WIDE_INT == 32
5200 && GET_CODE (x) == CONST_DOUBLE
5201 && CONST_DOUBLE_LOW (x) == 0xffffffff
5202 && CONST_DOUBLE_HIGH (x) == 0)
5203 {
5204 fputc ('l', file);
5205 break;
5206 }
5207 output_operand_lossage ("invalid %%U value");
5208 break;
5209
5210 case 's':
5211 /* Write the constant value divided by 8 for little-endian mode or
5212 (56 - value) / 8 for big-endian mode. */
5213
5214 if (GET_CODE (x) != CONST_INT
5215 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5216 ? 56
5217 : 64)
5218 || (INTVAL (x) & 7) != 0)
5219 output_operand_lossage ("invalid %%s value");
5220
5221 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5222 WORDS_BIG_ENDIAN
5223 ? (56 - INTVAL (x)) / 8
5224 : INTVAL (x) / 8);
5225 break;
5226
5227 case 'S':
5228 /* Same, except compute (64 - c) / 8 */
5229
5230 if (GET_CODE (x) != CONST_INT
5231 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5232 && (INTVAL (x) & 7) != 8)
5233 output_operand_lossage ("invalid %%s value");
5234
5235 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5236 break;
5237
5238 case 't':
5239 {
5240 /* On Unicos/Mk systems: use a DEX expression if the symbol
5241 clashes with a register name. */
5242 int dex = unicosmk_need_dex (x);
5243 if (dex)
5244 fprintf (file, "DEX(%d)", dex);
5245 else
5246 output_addr_const (file, x);
5247 }
5248 break;
5249
5250 case 'C': case 'D': case 'c': case 'd':
5251 /* Write out comparison name. */
5252 {
5253 enum rtx_code c = GET_CODE (x);
5254
5255 if (!COMPARISON_P (x))
5256 output_operand_lossage ("invalid %%C value");
5257
5258 else if (code == 'D')
5259 c = reverse_condition (c);
5260 else if (code == 'c')
5261 c = swap_condition (c);
5262 else if (code == 'd')
5263 c = swap_condition (reverse_condition (c));
5264
5265 if (c == LEU)
5266 fprintf (file, "ule");
5267 else if (c == LTU)
5268 fprintf (file, "ult");
5269 else if (c == UNORDERED)
5270 fprintf (file, "un");
5271 else
5272 fprintf (file, "%s", GET_RTX_NAME (c));
5273 }
5274 break;
5275
5276 case 'E':
5277 /* Write the divide or modulus operator. */
5278 switch (GET_CODE (x))
5279 {
5280 case DIV:
5281 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5282 break;
5283 case UDIV:
5284 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5285 break;
5286 case MOD:
5287 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5288 break;
5289 case UMOD:
5290 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5291 break;
5292 default:
5293 output_operand_lossage ("invalid %%E value");
5294 break;
5295 }
5296 break;
5297
5298 case 'A':
5299 /* Write "_u" for unaligned access. */
5300 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5301 fprintf (file, "_u");
5302 break;
5303
5304 case 0:
5305 if (GET_CODE (x) == REG)
5306 fprintf (file, "%s", reg_names[REGNO (x)]);
5307 else if (GET_CODE (x) == MEM)
5308 output_address (XEXP (x, 0));
5309 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5310 {
5311 switch (XINT (XEXP (x, 0), 1))
5312 {
5313 case UNSPEC_DTPREL:
5314 case UNSPEC_TPREL:
5315 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5316 break;
5317 default:
5318 output_operand_lossage ("unknown relocation unspec");
5319 break;
5320 }
5321 }
5322 else
5323 output_addr_const (file, x);
5324 break;
5325
5326 default:
5327 output_operand_lossage ("invalid %%xn code");
5328 }
5329 }
5330
5331 void
5332 print_operand_address (FILE *file, rtx addr)
5333 {
5334 int basereg = 31;
5335 HOST_WIDE_INT offset = 0;
5336
5337 if (GET_CODE (addr) == AND)
5338 addr = XEXP (addr, 0);
5339
5340 if (GET_CODE (addr) == PLUS
5341 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5342 {
5343 offset = INTVAL (XEXP (addr, 1));
5344 addr = XEXP (addr, 0);
5345 }
5346
5347 if (GET_CODE (addr) == LO_SUM)
5348 {
5349 const char *reloc16, *reloclo;
5350 rtx op1 = XEXP (addr, 1);
5351
5352 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5353 {
5354 op1 = XEXP (op1, 0);
5355 switch (XINT (op1, 1))
5356 {
5357 case UNSPEC_DTPREL:
5358 reloc16 = NULL;
5359 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5360 break;
5361 case UNSPEC_TPREL:
5362 reloc16 = NULL;
5363 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5364 break;
5365 default:
5366 output_operand_lossage ("unknown relocation unspec");
5367 return;
5368 }
5369
5370 output_addr_const (file, XVECEXP (op1, 0, 0));
5371 }
5372 else
5373 {
5374 reloc16 = "gprel";
5375 reloclo = "gprellow";
5376 output_addr_const (file, op1);
5377 }
5378
5379 if (offset)
5380 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5381
5382 addr = XEXP (addr, 0);
5383 switch (GET_CODE (addr))
5384 {
5385 case REG:
5386 basereg = REGNO (addr);
5387 break;
5388
5389 case SUBREG:
5390 basereg = subreg_regno (addr);
5391 break;
5392
5393 default:
5394 gcc_unreachable ();
5395 }
5396
5397 fprintf (file, "($%d)\t\t!%s", basereg,
5398 (basereg == 29 ? reloc16 : reloclo));
5399 return;
5400 }
5401
5402 switch (GET_CODE (addr))
5403 {
5404 case REG:
5405 basereg = REGNO (addr);
5406 break;
5407
5408 case SUBREG:
5409 basereg = subreg_regno (addr);
5410 break;
5411
5412 case CONST_INT:
5413 offset = INTVAL (addr);
5414 break;
5415
5416 #if TARGET_ABI_OPEN_VMS
5417 case SYMBOL_REF:
5418 fprintf (file, "%s", XSTR (addr, 0));
5419 return;
5420
5421 case CONST:
5422 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5423 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5424 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5425 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5426 INTVAL (XEXP (XEXP (addr, 0), 1)));
5427 return;
5428
5429 #endif
5430 default:
5431 gcc_unreachable ();
5432 }
5433
5434 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5435 }
5436 \f
5437 /* Emit RTL insns to initialize the variable parts of a trampoline at
5438 TRAMP. FNADDR is an RTX for the address of the function's pure
5439 code. CXT is an RTX for the static chain value for the function.
5440
5441 The three offset parameters are for the individual template's
5442 layout. A JMPOFS < 0 indicates that the trampoline does not
5443 contain instructions at all.
5444
5445 We assume here that a function will be called many more times than
5446 its address is taken (e.g., it might be passed to qsort), so we
5447 take the trouble to initialize the "hint" field in the JMP insn.
5448 Note that the hint field is PC (new) + 4 * bits 13:0. */
5449
5450 void
5451 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5452 int fnofs, int cxtofs, int jmpofs)
5453 {
5454 rtx temp, temp1, addr;
5455 /* VMS really uses DImode pointers in memory at this point. */
5456 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5457
5458 #ifdef POINTERS_EXTEND_UNSIGNED
5459 fnaddr = convert_memory_address (mode, fnaddr);
5460 cxt = convert_memory_address (mode, cxt);
5461 #endif
5462
5463 /* Store function address and CXT. */
5464 addr = memory_address (mode, plus_constant (tramp, fnofs));
5465 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5466 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5467 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5468
5469 /* This has been disabled since the hint only has a 32k range, and in
5470 no existing OS is the stack within 32k of the text segment. */
5471 if (0 && jmpofs >= 0)
5472 {
5473 /* Compute hint value. */
5474 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5475 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5476 OPTAB_WIDEN);
5477 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5478 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5479 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5480 GEN_INT (0x3fff), 0);
5481
5482 /* Merge in the hint. */
5483 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5484 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5485 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5486 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5487 OPTAB_WIDEN);
5488 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5489 }
5490
5491 #ifdef ENABLE_EXECUTE_STACK
5492 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5493 0, VOIDmode, 1, tramp, Pmode);
5494 #endif
5495
5496 if (jmpofs >= 0)
5497 emit_insn (gen_imb ());
5498 }
5499 \f
5500 /* Determine where to put an argument to a function.
5501 Value is zero to push the argument on the stack,
5502 or a hard register in which to store the argument.
5503
5504 MODE is the argument's machine mode.
5505 TYPE is the data type of the argument (as a tree).
5506 This is null for libcalls where that information may
5507 not be available.
5508 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5509 the preceding args and about the function being called.
5510 NAMED is nonzero if this argument is a named parameter
5511 (otherwise it is an extra parameter matching an ellipsis).
5512
5513 On Alpha the first 6 words of args are normally in registers
5514 and the rest are pushed. */
5515
5516 rtx
5517 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5518 int named ATTRIBUTE_UNUSED)
5519 {
5520 int basereg;
5521 int num_args;
5522
5523 /* Don't get confused and pass small structures in FP registers. */
5524 if (type && AGGREGATE_TYPE_P (type))
5525 basereg = 16;
5526 else
5527 {
5528 #ifdef ENABLE_CHECKING
5529 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5530 values here. */
5531 gcc_assert (!COMPLEX_MODE_P (mode));
5532 #endif
5533
5534 /* Set up defaults for FP operands passed in FP registers, and
5535 integral operands passed in integer registers. */
5536 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5537 basereg = 32 + 16;
5538 else
5539 basereg = 16;
5540 }
5541
5542 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5543 the three platforms, so we can't avoid conditional compilation. */
5544 #if TARGET_ABI_OPEN_VMS
5545 {
5546 if (mode == VOIDmode)
5547 return alpha_arg_info_reg_val (cum);
5548
5549 num_args = cum.num_args;
5550 if (num_args >= 6
5551 || targetm.calls.must_pass_in_stack (mode, type))
5552 return NULL_RTX;
5553 }
5554 #elif TARGET_ABI_UNICOSMK
5555 {
5556 int size;
5557
5558 /* If this is the last argument, generate the call info word (CIW). */
5559 /* ??? We don't include the caller's line number in the CIW because
5560 I don't know how to determine it if debug infos are turned off. */
5561 if (mode == VOIDmode)
5562 {
5563 int i;
5564 HOST_WIDE_INT lo;
5565 HOST_WIDE_INT hi;
5566 rtx ciw;
5567
5568 lo = 0;
5569
5570 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5571 if (cum.reg_args_type[i])
5572 lo |= (1 << (7 - i));
5573
5574 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5575 lo |= 7;
5576 else
5577 lo |= cum.num_reg_words;
5578
5579 #if HOST_BITS_PER_WIDE_INT == 32
5580 hi = (cum.num_args << 20) | cum.num_arg_words;
5581 #else
5582 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5583 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5584 hi = 0;
5585 #endif
5586 ciw = immed_double_const (lo, hi, DImode);
5587
5588 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5589 UNSPEC_UMK_LOAD_CIW);
5590 }
5591
5592 size = ALPHA_ARG_SIZE (mode, type, named);
5593 num_args = cum.num_reg_words;
5594 if (cum.force_stack
5595 || cum.num_reg_words + size > 6
5596 || targetm.calls.must_pass_in_stack (mode, type))
5597 return NULL_RTX;
5598 else if (type && TYPE_MODE (type) == BLKmode)
5599 {
5600 rtx reg1, reg2;
5601
5602 reg1 = gen_rtx_REG (DImode, num_args + 16);
5603 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5604
5605 /* The argument fits in two registers. Note that we still need to
5606 reserve a register for empty structures. */
5607 if (size == 0)
5608 return NULL_RTX;
5609 else if (size == 1)
5610 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5611 else
5612 {
5613 reg2 = gen_rtx_REG (DImode, num_args + 17);
5614 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5615 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5616 }
5617 }
5618 }
5619 #elif TARGET_ABI_OSF
5620 {
5621 if (cum >= 6)
5622 return NULL_RTX;
5623 num_args = cum;
5624
5625 /* VOID is passed as a special flag for "last argument". */
5626 if (type == void_type_node)
5627 basereg = 16;
5628 else if (targetm.calls.must_pass_in_stack (mode, type))
5629 return NULL_RTX;
5630 }
5631 #else
5632 #error Unhandled ABI
5633 #endif
5634
5635 return gen_rtx_REG (mode, num_args + basereg);
5636 }
5637
5638 static int
5639 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5640 enum machine_mode mode ATTRIBUTE_UNUSED,
5641 tree type ATTRIBUTE_UNUSED,
5642 bool named ATTRIBUTE_UNUSED)
5643 {
5644 int words = 0;
5645
5646 #if TARGET_ABI_OPEN_VMS
5647 if (cum->num_args < 6
5648 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5649 words = 6 - cum->num_args;
5650 #elif TARGET_ABI_UNICOSMK
5651 /* Never any split arguments. */
5652 #elif TARGET_ABI_OSF
5653 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5654 words = 6 - *cum;
5655 #else
5656 #error Unhandled ABI
5657 #endif
5658
5659 return words * UNITS_PER_WORD;
5660 }
5661
5662
5663 /* Return true if TYPE must be returned in memory, instead of in registers. */
5664
5665 static bool
5666 alpha_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
5667 {
5668 enum machine_mode mode = VOIDmode;
5669 int size;
5670
5671 if (type)
5672 {
5673 mode = TYPE_MODE (type);
5674
5675 /* All aggregates are returned in memory. */
5676 if (AGGREGATE_TYPE_P (type))
5677 return true;
5678 }
5679
5680 size = GET_MODE_SIZE (mode);
5681 switch (GET_MODE_CLASS (mode))
5682 {
5683 case MODE_VECTOR_FLOAT:
5684 /* Pass all float vectors in memory, like an aggregate. */
5685 return true;
5686
5687 case MODE_COMPLEX_FLOAT:
5688 /* We judge complex floats on the size of their element,
5689 not the size of the whole type. */
5690 size = GET_MODE_UNIT_SIZE (mode);
5691 break;
5692
5693 case MODE_INT:
5694 case MODE_FLOAT:
5695 case MODE_COMPLEX_INT:
5696 case MODE_VECTOR_INT:
5697 break;
5698
5699 default:
5700 /* ??? We get called on all sorts of random stuff from
5701 aggregate_value_p. We must return something, but it's not
5702 clear what's safe to return. Pretend it's a struct I
5703 guess. */
5704 return true;
5705 }
5706
5707 /* Otherwise types must fit in one register. */
5708 return size > UNITS_PER_WORD;
5709 }
5710
5711 /* Return true if TYPE should be passed by invisible reference. */
5712
5713 static bool
5714 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5715 enum machine_mode mode,
5716 tree type ATTRIBUTE_UNUSED,
5717 bool named ATTRIBUTE_UNUSED)
5718 {
5719 return mode == TFmode || mode == TCmode;
5720 }
5721
5722 /* Define how to find the value returned by a function. VALTYPE is the
5723 data type of the value (as a tree). If the precise function being
5724 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5725 MODE is set instead of VALTYPE for libcalls.
5726
5727 On Alpha the value is found in $0 for integer functions and
5728 $f0 for floating-point functions. */
5729
5730 rtx
5731 function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
5732 enum machine_mode mode)
5733 {
5734 unsigned int regnum, dummy;
5735 enum mode_class class;
5736
5737 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5738
5739 if (valtype)
5740 mode = TYPE_MODE (valtype);
5741
5742 class = GET_MODE_CLASS (mode);
5743 switch (class)
5744 {
5745 case MODE_INT:
5746 PROMOTE_MODE (mode, dummy, valtype);
5747 /* FALLTHRU */
5748
5749 case MODE_COMPLEX_INT:
5750 case MODE_VECTOR_INT:
5751 regnum = 0;
5752 break;
5753
5754 case MODE_FLOAT:
5755 regnum = 32;
5756 break;
5757
5758 case MODE_COMPLEX_FLOAT:
5759 {
5760 enum machine_mode cmode = GET_MODE_INNER (mode);
5761
5762 return gen_rtx_PARALLEL
5763 (VOIDmode,
5764 gen_rtvec (2,
5765 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5766 const0_rtx),
5767 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5768 GEN_INT (GET_MODE_SIZE (cmode)))));
5769 }
5770
5771 default:
5772 gcc_unreachable ();
5773 }
5774
5775 return gen_rtx_REG (mode, regnum);
5776 }
5777
5778 /* TCmode complex values are passed by invisible reference. We
5779 should not split these values. */
5780
5781 static bool
5782 alpha_split_complex_arg (tree type)
5783 {
5784 return TYPE_MODE (type) != TCmode;
5785 }
5786
5787 static tree
5788 alpha_build_builtin_va_list (void)
5789 {
5790 tree base, ofs, space, record, type_decl;
5791
5792 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5793 return ptr_type_node;
5794
5795 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5796 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5797 TREE_CHAIN (record) = type_decl;
5798 TYPE_NAME (record) = type_decl;
5799
5800 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5801
5802 /* Dummy field to prevent alignment warnings. */
5803 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5804 DECL_FIELD_CONTEXT (space) = record;
5805 DECL_ARTIFICIAL (space) = 1;
5806 DECL_IGNORED_P (space) = 1;
5807
5808 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5809 integer_type_node);
5810 DECL_FIELD_CONTEXT (ofs) = record;
5811 TREE_CHAIN (ofs) = space;
5812
5813 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5814 ptr_type_node);
5815 DECL_FIELD_CONTEXT (base) = record;
5816 TREE_CHAIN (base) = ofs;
5817
5818 TYPE_FIELDS (record) = base;
5819 layout_type (record);
5820
5821 va_list_gpr_counter_field = ofs;
5822 return record;
5823 }
5824
5825 #if TARGET_ABI_OSF
5826 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5827 and constant additions. */
5828
5829 static tree
5830 va_list_skip_additions (tree lhs)
5831 {
5832 tree rhs, stmt;
5833
5834 if (TREE_CODE (lhs) != SSA_NAME)
5835 return lhs;
5836
5837 for (;;)
5838 {
5839 stmt = SSA_NAME_DEF_STMT (lhs);
5840
5841 if (TREE_CODE (stmt) == PHI_NODE)
5842 return stmt;
5843
5844 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5845 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5846 return lhs;
5847
5848 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5849 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5850 rhs = TREE_OPERAND (rhs, 0);
5851
5852 if ((TREE_CODE (rhs) != NOP_EXPR
5853 && TREE_CODE (rhs) != CONVERT_EXPR
5854 && (TREE_CODE (rhs) != PLUS_EXPR
5855 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5856 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5857 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5858 return rhs;
5859
5860 lhs = TREE_OPERAND (rhs, 0);
5861 }
5862 }
5863
5864 /* Check if LHS = RHS statement is
5865 LHS = *(ap.__base + ap.__offset + cst)
5866 or
5867 LHS = *(ap.__base
5868 + ((ap.__offset + cst <= 47)
5869 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5870 If the former, indicate that GPR registers are needed,
5871 if the latter, indicate that FPR registers are needed.
5872
5873 Also look for LHS = (*ptr).field, where ptr is one of the forms
5874 listed above.
5875
5876 On alpha, cfun->va_list_gpr_size is used as size of the needed
5877 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5878 registers are needed and bit 1 set if FPR registers are needed.
5879 Return true if va_list references should not be scanned for the
5880 current statement. */
5881
5882 static bool
5883 alpha_stdarg_optimize_hook (struct stdarg_info *si, tree lhs, tree rhs)
5884 {
5885 tree base, offset, arg1, arg2;
5886 int offset_arg = 1;
5887
5888 while (handled_component_p (rhs))
5889 rhs = TREE_OPERAND (rhs, 0);
5890 if (TREE_CODE (rhs) != INDIRECT_REF
5891 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5892 return false;
5893
5894 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5895 if (lhs == NULL_TREE
5896 || TREE_CODE (lhs) != PLUS_EXPR)
5897 return false;
5898
5899 base = TREE_OPERAND (lhs, 0);
5900 if (TREE_CODE (base) == SSA_NAME)
5901 base = va_list_skip_additions (base);
5902
5903 if (TREE_CODE (base) != COMPONENT_REF
5904 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5905 {
5906 base = TREE_OPERAND (lhs, 0);
5907 if (TREE_CODE (base) == SSA_NAME)
5908 base = va_list_skip_additions (base);
5909
5910 if (TREE_CODE (base) != COMPONENT_REF
5911 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5912 return false;
5913
5914 offset_arg = 0;
5915 }
5916
5917 base = get_base_address (base);
5918 if (TREE_CODE (base) != VAR_DECL
5919 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5920 return false;
5921
5922 offset = TREE_OPERAND (lhs, offset_arg);
5923 if (TREE_CODE (offset) == SSA_NAME)
5924 offset = va_list_skip_additions (offset);
5925
5926 if (TREE_CODE (offset) == PHI_NODE)
5927 {
5928 HOST_WIDE_INT sub;
5929
5930 if (PHI_NUM_ARGS (offset) != 2)
5931 goto escapes;
5932
5933 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5934 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5935 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5936 {
5937 tree tem = arg1;
5938 arg1 = arg2;
5939 arg2 = tem;
5940
5941 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5942 goto escapes;
5943 }
5944 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5945 goto escapes;
5946
5947 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5948 if (TREE_CODE (arg2) == MINUS_EXPR)
5949 sub = -sub;
5950 if (sub < -48 || sub > -32)
5951 goto escapes;
5952
5953 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5954 if (arg1 != arg2)
5955 goto escapes;
5956
5957 if (TREE_CODE (arg1) == SSA_NAME)
5958 arg1 = va_list_skip_additions (arg1);
5959
5960 if (TREE_CODE (arg1) != COMPONENT_REF
5961 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5962 || get_base_address (arg1) != base)
5963 goto escapes;
5964
5965 /* Need floating point regs. */
5966 cfun->va_list_fpr_size |= 2;
5967 }
5968 else if (TREE_CODE (offset) != COMPONENT_REF
5969 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5970 || get_base_address (offset) != base)
5971 goto escapes;
5972 else
5973 /* Need general regs. */
5974 cfun->va_list_fpr_size |= 1;
5975 return false;
5976
5977 escapes:
5978 si->va_list_escapes = true;
5979 return false;
5980 }
5981 #endif
5982
5983 /* Perform any needed actions needed for a function that is receiving a
5984 variable number of arguments. */
5985
5986 static void
5987 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5988 tree type, int *pretend_size, int no_rtl)
5989 {
5990 CUMULATIVE_ARGS cum = *pcum;
5991
5992 /* Skip the current argument. */
5993 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5994
5995 #if TARGET_ABI_UNICOSMK
5996 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5997 arguments on the stack. Unfortunately, it doesn't always store the first
5998 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5999 with stdargs as we always have at least one named argument there. */
6000 if (cum.num_reg_words < 6)
6001 {
6002 if (!no_rtl)
6003 {
6004 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6005 emit_insn (gen_arg_home_umk ());
6006 }
6007 *pretend_size = 0;
6008 }
6009 #elif TARGET_ABI_OPEN_VMS
6010 /* For VMS, we allocate space for all 6 arg registers plus a count.
6011
6012 However, if NO registers need to be saved, don't allocate any space.
6013 This is not only because we won't need the space, but because AP
6014 includes the current_pretend_args_size and we don't want to mess up
6015 any ap-relative addresses already made. */
6016 if (cum.num_args < 6)
6017 {
6018 if (!no_rtl)
6019 {
6020 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6021 emit_insn (gen_arg_home ());
6022 }
6023 *pretend_size = 7 * UNITS_PER_WORD;
6024 }
6025 #else
6026 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6027 only push those that are remaining. However, if NO registers need to
6028 be saved, don't allocate any space. This is not only because we won't
6029 need the space, but because AP includes the current_pretend_args_size
6030 and we don't want to mess up any ap-relative addresses already made.
6031
6032 If we are not to use the floating-point registers, save the integer
6033 registers where we would put the floating-point registers. This is
6034 not the most efficient way to implement varargs with just one register
6035 class, but it isn't worth doing anything more efficient in this rare
6036 case. */
6037 if (cum >= 6)
6038 return;
6039
6040 if (!no_rtl)
6041 {
6042 int count, set = get_varargs_alias_set ();
6043 rtx tmp;
6044
6045 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6046 if (count > 6 - cum)
6047 count = 6 - cum;
6048
6049 /* Detect whether integer registers or floating-point registers
6050 are needed by the detected va_arg statements. See above for
6051 how these values are computed. Note that the "escape" value
6052 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6053 these bits set. */
6054 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6055
6056 if (cfun->va_list_fpr_size & 1)
6057 {
6058 tmp = gen_rtx_MEM (BLKmode,
6059 plus_constant (virtual_incoming_args_rtx,
6060 (cum + 6) * UNITS_PER_WORD));
6061 MEM_NOTRAP_P (tmp) = 1;
6062 set_mem_alias_set (tmp, set);
6063 move_block_from_reg (16 + cum, tmp, count);
6064 }
6065
6066 if (cfun->va_list_fpr_size & 2)
6067 {
6068 tmp = gen_rtx_MEM (BLKmode,
6069 plus_constant (virtual_incoming_args_rtx,
6070 cum * UNITS_PER_WORD));
6071 MEM_NOTRAP_P (tmp) = 1;
6072 set_mem_alias_set (tmp, set);
6073 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6074 }
6075 }
6076 *pretend_size = 12 * UNITS_PER_WORD;
6077 #endif
6078 }
6079
6080 void
6081 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6082 {
6083 HOST_WIDE_INT offset;
6084 tree t, offset_field, base_field;
6085
6086 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6087 return;
6088
6089 if (TARGET_ABI_UNICOSMK)
6090 std_expand_builtin_va_start (valist, nextarg);
6091
6092 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6093 up by 48, storing fp arg registers in the first 48 bytes, and the
6094 integer arg registers in the next 48 bytes. This is only done,
6095 however, if any integer registers need to be stored.
6096
6097 If no integer registers need be stored, then we must subtract 48
6098 in order to account for the integer arg registers which are counted
6099 in argsize above, but which are not actually stored on the stack.
6100 Must further be careful here about structures straddling the last
6101 integer argument register; that futzes with pretend_args_size,
6102 which changes the meaning of AP. */
6103
6104 if (NUM_ARGS < 6)
6105 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6106 else
6107 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6108
6109 if (TARGET_ABI_OPEN_VMS)
6110 {
6111 nextarg = plus_constant (nextarg, offset);
6112 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6113 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6114 make_tree (ptr_type_node, nextarg));
6115 TREE_SIDE_EFFECTS (t) = 1;
6116
6117 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6118 }
6119 else
6120 {
6121 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6122 offset_field = TREE_CHAIN (base_field);
6123
6124 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6125 valist, base_field, NULL_TREE);
6126 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6127 valist, offset_field, NULL_TREE);
6128
6129 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6130 t = build2 (PLUS_EXPR, ptr_type_node, t,
6131 build_int_cst (NULL_TREE, offset));
6132 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6133 TREE_SIDE_EFFECTS (t) = 1;
6134 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6135
6136 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6137 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6138 offset_field, t);
6139 TREE_SIDE_EFFECTS (t) = 1;
6140 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6141 }
6142 }
6143
6144 static tree
6145 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6146 {
6147 tree type_size, ptr_type, addend, t, addr, internal_post;
6148
6149 /* If the type could not be passed in registers, skip the block
6150 reserved for the registers. */
6151 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6152 {
6153 t = build_int_cst (TREE_TYPE (offset), 6*8);
6154 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6155 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6156 gimplify_and_add (t, pre_p);
6157 }
6158
6159 addend = offset;
6160 ptr_type = build_pointer_type (type);
6161
6162 if (TREE_CODE (type) == COMPLEX_TYPE)
6163 {
6164 tree real_part, imag_part, real_temp;
6165
6166 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6167 offset, pre_p);
6168
6169 /* Copy the value into a new temporary, lest the formal temporary
6170 be reused out from under us. */
6171 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6172
6173 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6174 offset, pre_p);
6175
6176 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6177 }
6178 else if (TREE_CODE (type) == REAL_TYPE)
6179 {
6180 tree fpaddend, cond, fourtyeight;
6181
6182 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6183 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6184 addend, fourtyeight);
6185 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6186 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6187 fpaddend, addend);
6188 }
6189
6190 /* Build the final address and force that value into a temporary. */
6191 addr = build2 (PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6192 fold_convert (ptr_type, addend));
6193 internal_post = NULL;
6194 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6195 append_to_statement_list (internal_post, pre_p);
6196
6197 /* Update the offset field. */
6198 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6199 if (type_size == NULL || TREE_OVERFLOW (type_size))
6200 t = size_zero_node;
6201 else
6202 {
6203 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6204 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6205 t = size_binop (MULT_EXPR, t, size_int (8));
6206 }
6207 t = fold_convert (TREE_TYPE (offset), t);
6208 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6209 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6210 gimplify_and_add (t, pre_p);
6211
6212 return build_va_arg_indirect_ref (addr);
6213 }
6214
6215 static tree
6216 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6217 {
6218 tree offset_field, base_field, offset, base, t, r;
6219 bool indirect;
6220
6221 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6222 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6223
6224 base_field = TYPE_FIELDS (va_list_type_node);
6225 offset_field = TREE_CHAIN (base_field);
6226 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6227 valist, base_field, NULL_TREE);
6228 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6229 valist, offset_field, NULL_TREE);
6230
6231 /* Pull the fields of the structure out into temporaries. Since we never
6232 modify the base field, we can use a formal temporary. Sign-extend the
6233 offset field so that it's the proper width for pointer arithmetic. */
6234 base = get_formal_tmp_var (base_field, pre_p);
6235
6236 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6237 offset = get_initialized_tmp_var (t, pre_p, NULL);
6238
6239 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6240 if (indirect)
6241 type = build_pointer_type (type);
6242
6243 /* Find the value. Note that this will be a stable indirection, or
6244 a composite of stable indirections in the case of complex. */
6245 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6246
6247 /* Stuff the offset temporary back into its field. */
6248 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6249 fold_convert (TREE_TYPE (offset_field), offset));
6250 gimplify_and_add (t, pre_p);
6251
6252 if (indirect)
6253 r = build_va_arg_indirect_ref (r);
6254
6255 return r;
6256 }
6257 \f
6258 /* Builtins. */
6259
6260 enum alpha_builtin
6261 {
6262 ALPHA_BUILTIN_CMPBGE,
6263 ALPHA_BUILTIN_EXTBL,
6264 ALPHA_BUILTIN_EXTWL,
6265 ALPHA_BUILTIN_EXTLL,
6266 ALPHA_BUILTIN_EXTQL,
6267 ALPHA_BUILTIN_EXTWH,
6268 ALPHA_BUILTIN_EXTLH,
6269 ALPHA_BUILTIN_EXTQH,
6270 ALPHA_BUILTIN_INSBL,
6271 ALPHA_BUILTIN_INSWL,
6272 ALPHA_BUILTIN_INSLL,
6273 ALPHA_BUILTIN_INSQL,
6274 ALPHA_BUILTIN_INSWH,
6275 ALPHA_BUILTIN_INSLH,
6276 ALPHA_BUILTIN_INSQH,
6277 ALPHA_BUILTIN_MSKBL,
6278 ALPHA_BUILTIN_MSKWL,
6279 ALPHA_BUILTIN_MSKLL,
6280 ALPHA_BUILTIN_MSKQL,
6281 ALPHA_BUILTIN_MSKWH,
6282 ALPHA_BUILTIN_MSKLH,
6283 ALPHA_BUILTIN_MSKQH,
6284 ALPHA_BUILTIN_UMULH,
6285 ALPHA_BUILTIN_ZAP,
6286 ALPHA_BUILTIN_ZAPNOT,
6287 ALPHA_BUILTIN_AMASK,
6288 ALPHA_BUILTIN_IMPLVER,
6289 ALPHA_BUILTIN_RPCC,
6290 ALPHA_BUILTIN_THREAD_POINTER,
6291 ALPHA_BUILTIN_SET_THREAD_POINTER,
6292
6293 /* TARGET_MAX */
6294 ALPHA_BUILTIN_MINUB8,
6295 ALPHA_BUILTIN_MINSB8,
6296 ALPHA_BUILTIN_MINUW4,
6297 ALPHA_BUILTIN_MINSW4,
6298 ALPHA_BUILTIN_MAXUB8,
6299 ALPHA_BUILTIN_MAXSB8,
6300 ALPHA_BUILTIN_MAXUW4,
6301 ALPHA_BUILTIN_MAXSW4,
6302 ALPHA_BUILTIN_PERR,
6303 ALPHA_BUILTIN_PKLB,
6304 ALPHA_BUILTIN_PKWB,
6305 ALPHA_BUILTIN_UNPKBL,
6306 ALPHA_BUILTIN_UNPKBW,
6307
6308 /* TARGET_CIX */
6309 ALPHA_BUILTIN_CTTZ,
6310 ALPHA_BUILTIN_CTLZ,
6311 ALPHA_BUILTIN_CTPOP,
6312
6313 ALPHA_BUILTIN_max
6314 };
6315
6316 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6317 CODE_FOR_builtin_cmpbge,
6318 CODE_FOR_builtin_extbl,
6319 CODE_FOR_builtin_extwl,
6320 CODE_FOR_builtin_extll,
6321 CODE_FOR_builtin_extql,
6322 CODE_FOR_builtin_extwh,
6323 CODE_FOR_builtin_extlh,
6324 CODE_FOR_builtin_extqh,
6325 CODE_FOR_builtin_insbl,
6326 CODE_FOR_builtin_inswl,
6327 CODE_FOR_builtin_insll,
6328 CODE_FOR_builtin_insql,
6329 CODE_FOR_builtin_inswh,
6330 CODE_FOR_builtin_inslh,
6331 CODE_FOR_builtin_insqh,
6332 CODE_FOR_builtin_mskbl,
6333 CODE_FOR_builtin_mskwl,
6334 CODE_FOR_builtin_mskll,
6335 CODE_FOR_builtin_mskql,
6336 CODE_FOR_builtin_mskwh,
6337 CODE_FOR_builtin_msklh,
6338 CODE_FOR_builtin_mskqh,
6339 CODE_FOR_umuldi3_highpart,
6340 CODE_FOR_builtin_zap,
6341 CODE_FOR_builtin_zapnot,
6342 CODE_FOR_builtin_amask,
6343 CODE_FOR_builtin_implver,
6344 CODE_FOR_builtin_rpcc,
6345 CODE_FOR_load_tp,
6346 CODE_FOR_set_tp,
6347
6348 /* TARGET_MAX */
6349 CODE_FOR_builtin_minub8,
6350 CODE_FOR_builtin_minsb8,
6351 CODE_FOR_builtin_minuw4,
6352 CODE_FOR_builtin_minsw4,
6353 CODE_FOR_builtin_maxub8,
6354 CODE_FOR_builtin_maxsb8,
6355 CODE_FOR_builtin_maxuw4,
6356 CODE_FOR_builtin_maxsw4,
6357 CODE_FOR_builtin_perr,
6358 CODE_FOR_builtin_pklb,
6359 CODE_FOR_builtin_pkwb,
6360 CODE_FOR_builtin_unpkbl,
6361 CODE_FOR_builtin_unpkbw,
6362
6363 /* TARGET_CIX */
6364 CODE_FOR_ctzdi2,
6365 CODE_FOR_clzdi2,
6366 CODE_FOR_popcountdi2
6367 };
6368
6369 struct alpha_builtin_def
6370 {
6371 const char *name;
6372 enum alpha_builtin code;
6373 unsigned int target_mask;
6374 bool is_const;
6375 };
6376
6377 static struct alpha_builtin_def const zero_arg_builtins[] = {
6378 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6379 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6380 };
6381
6382 static struct alpha_builtin_def const one_arg_builtins[] = {
6383 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6384 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6385 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6386 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6387 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6388 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6389 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6390 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6391 };
6392
6393 static struct alpha_builtin_def const two_arg_builtins[] = {
6394 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6395 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6396 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6397 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6398 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6399 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6400 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6401 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6402 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6403 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6404 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6405 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6406 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6407 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6408 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6409 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6410 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6411 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6412 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6413 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6414 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6415 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6416 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6417 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6418 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6419 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6420 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6421 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6422 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6423 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6424 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6425 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6426 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6427 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6428 };
6429
6430 static GTY(()) tree alpha_v8qi_u;
6431 static GTY(()) tree alpha_v8qi_s;
6432 static GTY(()) tree alpha_v4hi_u;
6433 static GTY(()) tree alpha_v4hi_s;
6434
6435 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6436 functions pointed to by P, with function type FTYPE. */
6437
6438 static void
6439 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6440 tree ftype)
6441 {
6442 tree decl;
6443 size_t i;
6444
6445 for (i = 0; i < count; ++i, ++p)
6446 if ((target_flags & p->target_mask) == p->target_mask)
6447 {
6448 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6449 NULL, NULL);
6450 if (p->is_const)
6451 TREE_READONLY (decl) = 1;
6452 TREE_NOTHROW (decl) = 1;
6453 }
6454 }
6455
6456
6457 static void
6458 alpha_init_builtins (void)
6459 {
6460 tree dimode_integer_type_node;
6461 tree ftype, decl;
6462
6463 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6464
6465 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6466 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6467 ftype);
6468
6469 ftype = build_function_type_list (dimode_integer_type_node,
6470 dimode_integer_type_node, NULL_TREE);
6471 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6472 ftype);
6473
6474 ftype = build_function_type_list (dimode_integer_type_node,
6475 dimode_integer_type_node,
6476 dimode_integer_type_node, NULL_TREE);
6477 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6478 ftype);
6479
6480 ftype = build_function_type (ptr_type_node, void_list_node);
6481 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6482 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6483 NULL, NULL);
6484 TREE_NOTHROW (decl) = 1;
6485
6486 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6487 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6488 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6489 NULL, NULL);
6490 TREE_NOTHROW (decl) = 1;
6491
6492 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6493 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6494 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6495 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6496 }
6497
6498 /* Expand an expression EXP that calls a built-in function,
6499 with result going to TARGET if that's convenient
6500 (and in mode MODE if that's convenient).
6501 SUBTARGET may be used as the target for computing one of EXP's operands.
6502 IGNORE is nonzero if the value is to be ignored. */
6503
6504 static rtx
6505 alpha_expand_builtin (tree exp, rtx target,
6506 rtx subtarget ATTRIBUTE_UNUSED,
6507 enum machine_mode mode ATTRIBUTE_UNUSED,
6508 int ignore ATTRIBUTE_UNUSED)
6509 {
6510 #define MAX_ARGS 2
6511
6512 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6513 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6514 tree arg;
6515 call_expr_arg_iterator iter;
6516 enum insn_code icode;
6517 rtx op[MAX_ARGS], pat;
6518 int arity;
6519 bool nonvoid;
6520
6521 if (fcode >= ALPHA_BUILTIN_max)
6522 internal_error ("bad builtin fcode");
6523 icode = code_for_builtin[fcode];
6524 if (icode == 0)
6525 internal_error ("bad builtin fcode");
6526
6527 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6528
6529 arity = 0;
6530 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6531 {
6532 const struct insn_operand_data *insn_op;
6533
6534 if (arg == error_mark_node)
6535 return NULL_RTX;
6536 if (arity > MAX_ARGS)
6537 return NULL_RTX;
6538
6539 insn_op = &insn_data[icode].operand[arity + nonvoid];
6540
6541 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6542
6543 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6544 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6545 arity++;
6546 }
6547
6548 if (nonvoid)
6549 {
6550 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6551 if (!target
6552 || GET_MODE (target) != tmode
6553 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6554 target = gen_reg_rtx (tmode);
6555 }
6556
6557 switch (arity)
6558 {
6559 case 0:
6560 pat = GEN_FCN (icode) (target);
6561 break;
6562 case 1:
6563 if (nonvoid)
6564 pat = GEN_FCN (icode) (target, op[0]);
6565 else
6566 pat = GEN_FCN (icode) (op[0]);
6567 break;
6568 case 2:
6569 pat = GEN_FCN (icode) (target, op[0], op[1]);
6570 break;
6571 default:
6572 gcc_unreachable ();
6573 }
6574 if (!pat)
6575 return NULL_RTX;
6576 emit_insn (pat);
6577
6578 if (nonvoid)
6579 return target;
6580 else
6581 return const0_rtx;
6582 }
6583
6584
6585 /* Several bits below assume HWI >= 64 bits. This should be enforced
6586 by config.gcc. */
6587 #if HOST_BITS_PER_WIDE_INT < 64
6588 # error "HOST_WIDE_INT too small"
6589 #endif
6590
6591 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6592 with an 8-bit output vector. OPINT contains the integer operands; bit N
6593 of OP_CONST is set if OPINT[N] is valid. */
6594
6595 static tree
6596 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6597 {
6598 if (op_const == 3)
6599 {
6600 int i, val;
6601 for (i = 0, val = 0; i < 8; ++i)
6602 {
6603 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6604 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6605 if (c0 >= c1)
6606 val |= 1 << i;
6607 }
6608 return build_int_cst (long_integer_type_node, val);
6609 }
6610 else if (op_const == 2 && opint[1] == 0)
6611 return build_int_cst (long_integer_type_node, 0xff);
6612 return NULL;
6613 }
6614
6615 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6616 specialized form of an AND operation. Other byte manipulation instructions
6617 are defined in terms of this instruction, so this is also used as a
6618 subroutine for other builtins.
6619
6620 OP contains the tree operands; OPINT contains the extracted integer values.
6621 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6622 OPINT may be considered. */
6623
6624 static tree
6625 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6626 long op_const)
6627 {
6628 if (op_const & 2)
6629 {
6630 unsigned HOST_WIDE_INT mask = 0;
6631 int i;
6632
6633 for (i = 0; i < 8; ++i)
6634 if ((opint[1] >> i) & 1)
6635 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6636
6637 if (op_const & 1)
6638 return build_int_cst (long_integer_type_node, opint[0] & mask);
6639
6640 if (op)
6641 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6642 build_int_cst (long_integer_type_node, mask));
6643 }
6644 else if ((op_const & 1) && opint[0] == 0)
6645 return build_int_cst (long_integer_type_node, 0);
6646 return NULL;
6647 }
6648
6649 /* Fold the builtins for the EXT family of instructions. */
6650
6651 static tree
6652 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6653 long op_const, unsigned HOST_WIDE_INT bytemask,
6654 bool is_high)
6655 {
6656 long zap_const = 2;
6657 tree *zap_op = NULL;
6658
6659 if (op_const & 2)
6660 {
6661 unsigned HOST_WIDE_INT loc;
6662
6663 loc = opint[1] & 7;
6664 if (BYTES_BIG_ENDIAN)
6665 loc ^= 7;
6666 loc *= 8;
6667
6668 if (loc != 0)
6669 {
6670 if (op_const & 1)
6671 {
6672 unsigned HOST_WIDE_INT temp = opint[0];
6673 if (is_high)
6674 temp <<= loc;
6675 else
6676 temp >>= loc;
6677 opint[0] = temp;
6678 zap_const = 3;
6679 }
6680 }
6681 else
6682 zap_op = op;
6683 }
6684
6685 opint[1] = bytemask;
6686 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6687 }
6688
6689 /* Fold the builtins for the INS family of instructions. */
6690
6691 static tree
6692 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6693 long op_const, unsigned HOST_WIDE_INT bytemask,
6694 bool is_high)
6695 {
6696 if ((op_const & 1) && opint[0] == 0)
6697 return build_int_cst (long_integer_type_node, 0);
6698
6699 if (op_const & 2)
6700 {
6701 unsigned HOST_WIDE_INT temp, loc, byteloc;
6702 tree *zap_op = NULL;
6703
6704 loc = opint[1] & 7;
6705 if (BYTES_BIG_ENDIAN)
6706 loc ^= 7;
6707 bytemask <<= loc;
6708
6709 temp = opint[0];
6710 if (is_high)
6711 {
6712 byteloc = (64 - (loc * 8)) & 0x3f;
6713 if (byteloc == 0)
6714 zap_op = op;
6715 else
6716 temp >>= byteloc;
6717 bytemask >>= 8;
6718 }
6719 else
6720 {
6721 byteloc = loc * 8;
6722 if (byteloc == 0)
6723 zap_op = op;
6724 else
6725 temp <<= byteloc;
6726 }
6727
6728 opint[0] = temp;
6729 opint[1] = bytemask;
6730 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6731 }
6732
6733 return NULL;
6734 }
6735
6736 static tree
6737 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6738 long op_const, unsigned HOST_WIDE_INT bytemask,
6739 bool is_high)
6740 {
6741 if (op_const & 2)
6742 {
6743 unsigned HOST_WIDE_INT loc;
6744
6745 loc = opint[1] & 7;
6746 if (BYTES_BIG_ENDIAN)
6747 loc ^= 7;
6748 bytemask <<= loc;
6749
6750 if (is_high)
6751 bytemask >>= 8;
6752
6753 opint[1] = bytemask ^ 0xff;
6754 }
6755
6756 return alpha_fold_builtin_zapnot (op, opint, op_const);
6757 }
6758
6759 static tree
6760 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6761 {
6762 switch (op_const)
6763 {
6764 case 3:
6765 {
6766 unsigned HOST_WIDE_INT l;
6767 HOST_WIDE_INT h;
6768
6769 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6770
6771 #if HOST_BITS_PER_WIDE_INT > 64
6772 # error fixme
6773 #endif
6774
6775 return build_int_cst (long_integer_type_node, h);
6776 }
6777
6778 case 1:
6779 opint[1] = opint[0];
6780 /* FALLTHRU */
6781 case 2:
6782 /* Note that (X*1) >> 64 == 0. */
6783 if (opint[1] == 0 || opint[1] == 1)
6784 return build_int_cst (long_integer_type_node, 0);
6785 break;
6786 }
6787 return NULL;
6788 }
6789
6790 static tree
6791 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6792 {
6793 tree op0 = fold_convert (vtype, op[0]);
6794 tree op1 = fold_convert (vtype, op[1]);
6795 tree val = fold_build2 (code, vtype, op0, op1);
6796 return fold_convert (long_integer_type_node, val);
6797 }
6798
6799 static tree
6800 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6801 {
6802 unsigned HOST_WIDE_INT temp = 0;
6803 int i;
6804
6805 if (op_const != 3)
6806 return NULL;
6807
6808 for (i = 0; i < 8; ++i)
6809 {
6810 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6811 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6812 if (a >= b)
6813 temp += a - b;
6814 else
6815 temp += b - a;
6816 }
6817
6818 return build_int_cst (long_integer_type_node, temp);
6819 }
6820
6821 static tree
6822 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6823 {
6824 unsigned HOST_WIDE_INT temp;
6825
6826 if (op_const == 0)
6827 return NULL;
6828
6829 temp = opint[0] & 0xff;
6830 temp |= (opint[0] >> 24) & 0xff00;
6831
6832 return build_int_cst (long_integer_type_node, temp);
6833 }
6834
6835 static tree
6836 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6837 {
6838 unsigned HOST_WIDE_INT temp;
6839
6840 if (op_const == 0)
6841 return NULL;
6842
6843 temp = opint[0] & 0xff;
6844 temp |= (opint[0] >> 8) & 0xff00;
6845 temp |= (opint[0] >> 16) & 0xff0000;
6846 temp |= (opint[0] >> 24) & 0xff000000;
6847
6848 return build_int_cst (long_integer_type_node, temp);
6849 }
6850
6851 static tree
6852 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6853 {
6854 unsigned HOST_WIDE_INT temp;
6855
6856 if (op_const == 0)
6857 return NULL;
6858
6859 temp = opint[0] & 0xff;
6860 temp |= (opint[0] & 0xff00) << 24;
6861
6862 return build_int_cst (long_integer_type_node, temp);
6863 }
6864
6865 static tree
6866 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6867 {
6868 unsigned HOST_WIDE_INT temp;
6869
6870 if (op_const == 0)
6871 return NULL;
6872
6873 temp = opint[0] & 0xff;
6874 temp |= (opint[0] & 0x0000ff00) << 8;
6875 temp |= (opint[0] & 0x00ff0000) << 16;
6876 temp |= (opint[0] & 0xff000000) << 24;
6877
6878 return build_int_cst (long_integer_type_node, temp);
6879 }
6880
6881 static tree
6882 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6883 {
6884 unsigned HOST_WIDE_INT temp;
6885
6886 if (op_const == 0)
6887 return NULL;
6888
6889 if (opint[0] == 0)
6890 temp = 64;
6891 else
6892 temp = exact_log2 (opint[0] & -opint[0]);
6893
6894 return build_int_cst (long_integer_type_node, temp);
6895 }
6896
6897 static tree
6898 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6899 {
6900 unsigned HOST_WIDE_INT temp;
6901
6902 if (op_const == 0)
6903 return NULL;
6904
6905 if (opint[0] == 0)
6906 temp = 64;
6907 else
6908 temp = 64 - floor_log2 (opint[0]) - 1;
6909
6910 return build_int_cst (long_integer_type_node, temp);
6911 }
6912
6913 static tree
6914 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6915 {
6916 unsigned HOST_WIDE_INT temp, op;
6917
6918 if (op_const == 0)
6919 return NULL;
6920
6921 op = opint[0];
6922 temp = 0;
6923 while (op)
6924 temp++, op &= op - 1;
6925
6926 return build_int_cst (long_integer_type_node, temp);
6927 }
6928
6929 /* Fold one of our builtin functions. */
6930
6931 static tree
6932 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6933 {
6934 tree op[MAX_ARGS], t;
6935 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6936 long op_const = 0, arity = 0;
6937
6938 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6939 {
6940 tree arg = TREE_VALUE (t);
6941 if (arg == error_mark_node)
6942 return NULL;
6943 if (arity >= MAX_ARGS)
6944 return NULL;
6945
6946 op[arity] = arg;
6947 opint[arity] = 0;
6948 if (TREE_CODE (arg) == INTEGER_CST)
6949 {
6950 op_const |= 1L << arity;
6951 opint[arity] = int_cst_value (arg);
6952 }
6953 }
6954
6955 switch (DECL_FUNCTION_CODE (fndecl))
6956 {
6957 case ALPHA_BUILTIN_CMPBGE:
6958 return alpha_fold_builtin_cmpbge (opint, op_const);
6959
6960 case ALPHA_BUILTIN_EXTBL:
6961 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6962 case ALPHA_BUILTIN_EXTWL:
6963 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6964 case ALPHA_BUILTIN_EXTLL:
6965 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6966 case ALPHA_BUILTIN_EXTQL:
6967 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6968 case ALPHA_BUILTIN_EXTWH:
6969 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6970 case ALPHA_BUILTIN_EXTLH:
6971 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6972 case ALPHA_BUILTIN_EXTQH:
6973 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6974
6975 case ALPHA_BUILTIN_INSBL:
6976 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6977 case ALPHA_BUILTIN_INSWL:
6978 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6979 case ALPHA_BUILTIN_INSLL:
6980 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6981 case ALPHA_BUILTIN_INSQL:
6982 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6983 case ALPHA_BUILTIN_INSWH:
6984 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6985 case ALPHA_BUILTIN_INSLH:
6986 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6987 case ALPHA_BUILTIN_INSQH:
6988 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6989
6990 case ALPHA_BUILTIN_MSKBL:
6991 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6992 case ALPHA_BUILTIN_MSKWL:
6993 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6994 case ALPHA_BUILTIN_MSKLL:
6995 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6996 case ALPHA_BUILTIN_MSKQL:
6997 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6998 case ALPHA_BUILTIN_MSKWH:
6999 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7000 case ALPHA_BUILTIN_MSKLH:
7001 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7002 case ALPHA_BUILTIN_MSKQH:
7003 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7004
7005 case ALPHA_BUILTIN_UMULH:
7006 return alpha_fold_builtin_umulh (opint, op_const);
7007
7008 case ALPHA_BUILTIN_ZAP:
7009 opint[1] ^= 0xff;
7010 /* FALLTHRU */
7011 case ALPHA_BUILTIN_ZAPNOT:
7012 return alpha_fold_builtin_zapnot (op, opint, op_const);
7013
7014 case ALPHA_BUILTIN_MINUB8:
7015 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7016 case ALPHA_BUILTIN_MINSB8:
7017 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7018 case ALPHA_BUILTIN_MINUW4:
7019 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7020 case ALPHA_BUILTIN_MINSW4:
7021 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7022 case ALPHA_BUILTIN_MAXUB8:
7023 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7024 case ALPHA_BUILTIN_MAXSB8:
7025 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7026 case ALPHA_BUILTIN_MAXUW4:
7027 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7028 case ALPHA_BUILTIN_MAXSW4:
7029 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7030
7031 case ALPHA_BUILTIN_PERR:
7032 return alpha_fold_builtin_perr (opint, op_const);
7033 case ALPHA_BUILTIN_PKLB:
7034 return alpha_fold_builtin_pklb (opint, op_const);
7035 case ALPHA_BUILTIN_PKWB:
7036 return alpha_fold_builtin_pkwb (opint, op_const);
7037 case ALPHA_BUILTIN_UNPKBL:
7038 return alpha_fold_builtin_unpkbl (opint, op_const);
7039 case ALPHA_BUILTIN_UNPKBW:
7040 return alpha_fold_builtin_unpkbw (opint, op_const);
7041
7042 case ALPHA_BUILTIN_CTTZ:
7043 return alpha_fold_builtin_cttz (opint, op_const);
7044 case ALPHA_BUILTIN_CTLZ:
7045 return alpha_fold_builtin_ctlz (opint, op_const);
7046 case ALPHA_BUILTIN_CTPOP:
7047 return alpha_fold_builtin_ctpop (opint, op_const);
7048
7049 case ALPHA_BUILTIN_AMASK:
7050 case ALPHA_BUILTIN_IMPLVER:
7051 case ALPHA_BUILTIN_RPCC:
7052 case ALPHA_BUILTIN_THREAD_POINTER:
7053 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7054 /* None of these are foldable at compile-time. */
7055 default:
7056 return NULL;
7057 }
7058 }
7059 \f
7060 /* This page contains routines that are used to determine what the function
7061 prologue and epilogue code will do and write them out. */
7062
7063 /* Compute the size of the save area in the stack. */
7064
7065 /* These variables are used for communication between the following functions.
7066 They indicate various things about the current function being compiled
7067 that are used to tell what kind of prologue, epilogue and procedure
7068 descriptor to generate. */
7069
7070 /* Nonzero if we need a stack procedure. */
7071 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7072 static enum alpha_procedure_types alpha_procedure_type;
7073
7074 /* Register number (either FP or SP) that is used to unwind the frame. */
7075 static int vms_unwind_regno;
7076
7077 /* Register number used to save FP. We need not have one for RA since
7078 we don't modify it for register procedures. This is only defined
7079 for register frame procedures. */
7080 static int vms_save_fp_regno;
7081
7082 /* Register number used to reference objects off our PV. */
7083 static int vms_base_regno;
7084
7085 /* Compute register masks for saved registers. */
7086
7087 static void
7088 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7089 {
7090 unsigned long imask = 0;
7091 unsigned long fmask = 0;
7092 unsigned int i;
7093
7094 /* When outputting a thunk, we don't have valid register life info,
7095 but assemble_start_function wants to output .frame and .mask
7096 directives. */
7097 if (current_function_is_thunk)
7098 {
7099 *imaskP = 0;
7100 *fmaskP = 0;
7101 return;
7102 }
7103
7104 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7105 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7106
7107 /* One for every register we have to save. */
7108 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7109 if (! fixed_regs[i] && ! call_used_regs[i]
7110 && regs_ever_live[i] && i != REG_RA
7111 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7112 {
7113 if (i < 32)
7114 imask |= (1UL << i);
7115 else
7116 fmask |= (1UL << (i - 32));
7117 }
7118
7119 /* We need to restore these for the handler. */
7120 if (current_function_calls_eh_return)
7121 {
7122 for (i = 0; ; ++i)
7123 {
7124 unsigned regno = EH_RETURN_DATA_REGNO (i);
7125 if (regno == INVALID_REGNUM)
7126 break;
7127 imask |= 1UL << regno;
7128 }
7129 }
7130
7131 /* If any register spilled, then spill the return address also. */
7132 /* ??? This is required by the Digital stack unwind specification
7133 and isn't needed if we're doing Dwarf2 unwinding. */
7134 if (imask || fmask || alpha_ra_ever_killed ())
7135 imask |= (1UL << REG_RA);
7136
7137 *imaskP = imask;
7138 *fmaskP = fmask;
7139 }
7140
7141 int
7142 alpha_sa_size (void)
7143 {
7144 unsigned long mask[2];
7145 int sa_size = 0;
7146 int i, j;
7147
7148 alpha_sa_mask (&mask[0], &mask[1]);
7149
7150 if (TARGET_ABI_UNICOSMK)
7151 {
7152 if (mask[0] || mask[1])
7153 sa_size = 14;
7154 }
7155 else
7156 {
7157 for (j = 0; j < 2; ++j)
7158 for (i = 0; i < 32; ++i)
7159 if ((mask[j] >> i) & 1)
7160 sa_size++;
7161 }
7162
7163 if (TARGET_ABI_UNICOSMK)
7164 {
7165 /* We might not need to generate a frame if we don't make any calls
7166 (including calls to __T3E_MISMATCH if this is a vararg function),
7167 don't have any local variables which require stack slots, don't
7168 use alloca and have not determined that we need a frame for other
7169 reasons. */
7170
7171 alpha_procedure_type
7172 = (sa_size || get_frame_size() != 0
7173 || current_function_outgoing_args_size
7174 || current_function_stdarg || current_function_calls_alloca
7175 || frame_pointer_needed)
7176 ? PT_STACK : PT_REGISTER;
7177
7178 /* Always reserve space for saving callee-saved registers if we
7179 need a frame as required by the calling convention. */
7180 if (alpha_procedure_type == PT_STACK)
7181 sa_size = 14;
7182 }
7183 else if (TARGET_ABI_OPEN_VMS)
7184 {
7185 /* Start by assuming we can use a register procedure if we don't
7186 make any calls (REG_RA not used) or need to save any
7187 registers and a stack procedure if we do. */
7188 if ((mask[0] >> REG_RA) & 1)
7189 alpha_procedure_type = PT_STACK;
7190 else if (get_frame_size() != 0)
7191 alpha_procedure_type = PT_REGISTER;
7192 else
7193 alpha_procedure_type = PT_NULL;
7194
7195 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7196 made the final decision on stack procedure vs register procedure. */
7197 if (alpha_procedure_type == PT_STACK)
7198 sa_size -= 2;
7199
7200 /* Decide whether to refer to objects off our PV via FP or PV.
7201 If we need FP for something else or if we receive a nonlocal
7202 goto (which expects PV to contain the value), we must use PV.
7203 Otherwise, start by assuming we can use FP. */
7204
7205 vms_base_regno
7206 = (frame_pointer_needed
7207 || current_function_has_nonlocal_label
7208 || alpha_procedure_type == PT_STACK
7209 || current_function_outgoing_args_size)
7210 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7211
7212 /* If we want to copy PV into FP, we need to find some register
7213 in which to save FP. */
7214
7215 vms_save_fp_regno = -1;
7216 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7217 for (i = 0; i < 32; i++)
7218 if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
7219 vms_save_fp_regno = i;
7220
7221 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7222 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7223 else if (alpha_procedure_type == PT_NULL)
7224 vms_base_regno = REG_PV;
7225
7226 /* Stack unwinding should be done via FP unless we use it for PV. */
7227 vms_unwind_regno = (vms_base_regno == REG_PV
7228 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7229
7230 /* If this is a stack procedure, allow space for saving FP and RA. */
7231 if (alpha_procedure_type == PT_STACK)
7232 sa_size += 2;
7233 }
7234 else
7235 {
7236 /* Our size must be even (multiple of 16 bytes). */
7237 if (sa_size & 1)
7238 sa_size++;
7239 }
7240
7241 return sa_size * 8;
7242 }
7243
7244 /* Define the offset between two registers, one to be eliminated,
7245 and the other its replacement, at the start of a routine. */
7246
7247 HOST_WIDE_INT
7248 alpha_initial_elimination_offset (unsigned int from,
7249 unsigned int to ATTRIBUTE_UNUSED)
7250 {
7251 HOST_WIDE_INT ret;
7252
7253 ret = alpha_sa_size ();
7254 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7255
7256 switch (from)
7257 {
7258 case FRAME_POINTER_REGNUM:
7259 break;
7260
7261 case ARG_POINTER_REGNUM:
7262 ret += (ALPHA_ROUND (get_frame_size ()
7263 + current_function_pretend_args_size)
7264 - current_function_pretend_args_size);
7265 break;
7266
7267 default:
7268 gcc_unreachable ();
7269 }
7270
7271 return ret;
7272 }
7273
7274 int
7275 alpha_pv_save_size (void)
7276 {
7277 alpha_sa_size ();
7278 return alpha_procedure_type == PT_STACK ? 8 : 0;
7279 }
7280
7281 int
7282 alpha_using_fp (void)
7283 {
7284 alpha_sa_size ();
7285 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7286 }
7287
7288 #if TARGET_ABI_OPEN_VMS
7289
7290 const struct attribute_spec vms_attribute_table[] =
7291 {
7292 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7293 { "overlaid", 0, 0, true, false, false, NULL },
7294 { "global", 0, 0, true, false, false, NULL },
7295 { "initialize", 0, 0, true, false, false, NULL },
7296 { NULL, 0, 0, false, false, false, NULL }
7297 };
7298
7299 #endif
7300
7301 static int
7302 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7303 {
7304 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7305 }
7306
7307 int
7308 alpha_find_lo_sum_using_gp (rtx insn)
7309 {
7310 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7311 }
7312
7313 static int
7314 alpha_does_function_need_gp (void)
7315 {
7316 rtx insn;
7317
7318 /* The GP being variable is an OSF abi thing. */
7319 if (! TARGET_ABI_OSF)
7320 return 0;
7321
7322 /* We need the gp to load the address of __mcount. */
7323 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7324 return 1;
7325
7326 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7327 if (current_function_is_thunk)
7328 return 1;
7329
7330 /* The nonlocal receiver pattern assumes that the gp is valid for
7331 the nested function. Reasonable because it's almost always set
7332 correctly already. For the cases where that's wrong, make sure
7333 the nested function loads its gp on entry. */
7334 if (current_function_has_nonlocal_goto)
7335 return 1;
7336
7337 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7338 Even if we are a static function, we still need to do this in case
7339 our address is taken and passed to something like qsort. */
7340
7341 push_topmost_sequence ();
7342 insn = get_insns ();
7343 pop_topmost_sequence ();
7344
7345 for (; insn; insn = NEXT_INSN (insn))
7346 if (INSN_P (insn)
7347 && ! JUMP_TABLE_DATA_P (insn)
7348 && GET_CODE (PATTERN (insn)) != USE
7349 && GET_CODE (PATTERN (insn)) != CLOBBER
7350 && get_attr_usegp (insn))
7351 return 1;
7352
7353 return 0;
7354 }
7355
7356 \f
7357 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7358 sequences. */
7359
7360 static rtx
7361 set_frame_related_p (void)
7362 {
7363 rtx seq = get_insns ();
7364 rtx insn;
7365
7366 end_sequence ();
7367
7368 if (!seq)
7369 return NULL_RTX;
7370
7371 if (INSN_P (seq))
7372 {
7373 insn = seq;
7374 while (insn != NULL_RTX)
7375 {
7376 RTX_FRAME_RELATED_P (insn) = 1;
7377 insn = NEXT_INSN (insn);
7378 }
7379 seq = emit_insn (seq);
7380 }
7381 else
7382 {
7383 seq = emit_insn (seq);
7384 RTX_FRAME_RELATED_P (seq) = 1;
7385 }
7386 return seq;
7387 }
7388
7389 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7390
7391 /* Generates a store with the proper unwind info attached. VALUE is
7392 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7393 contains SP+FRAME_BIAS, and that is the unwind info that should be
7394 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7395 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7396
7397 static void
7398 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7399 HOST_WIDE_INT base_ofs, rtx frame_reg)
7400 {
7401 rtx addr, mem, insn;
7402
7403 addr = plus_constant (base_reg, base_ofs);
7404 mem = gen_rtx_MEM (DImode, addr);
7405 set_mem_alias_set (mem, alpha_sr_alias_set);
7406
7407 insn = emit_move_insn (mem, value);
7408 RTX_FRAME_RELATED_P (insn) = 1;
7409
7410 if (frame_bias || value != frame_reg)
7411 {
7412 if (frame_bias)
7413 {
7414 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7415 mem = gen_rtx_MEM (DImode, addr);
7416 }
7417
7418 REG_NOTES (insn)
7419 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7420 gen_rtx_SET (VOIDmode, mem, frame_reg),
7421 REG_NOTES (insn));
7422 }
7423 }
7424
7425 static void
7426 emit_frame_store (unsigned int regno, rtx base_reg,
7427 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7428 {
7429 rtx reg = gen_rtx_REG (DImode, regno);
7430 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7431 }
7432
7433 /* Write function prologue. */
7434
7435 /* On vms we have two kinds of functions:
7436
7437 - stack frame (PROC_STACK)
7438 these are 'normal' functions with local vars and which are
7439 calling other functions
7440 - register frame (PROC_REGISTER)
7441 keeps all data in registers, needs no stack
7442
7443 We must pass this to the assembler so it can generate the
7444 proper pdsc (procedure descriptor)
7445 This is done with the '.pdesc' command.
7446
7447 On not-vms, we don't really differentiate between the two, as we can
7448 simply allocate stack without saving registers. */
7449
7450 void
7451 alpha_expand_prologue (void)
7452 {
7453 /* Registers to save. */
7454 unsigned long imask = 0;
7455 unsigned long fmask = 0;
7456 /* Stack space needed for pushing registers clobbered by us. */
7457 HOST_WIDE_INT sa_size;
7458 /* Complete stack size needed. */
7459 HOST_WIDE_INT frame_size;
7460 /* Offset from base reg to register save area. */
7461 HOST_WIDE_INT reg_offset;
7462 rtx sa_reg;
7463 int i;
7464
7465 sa_size = alpha_sa_size ();
7466
7467 frame_size = get_frame_size ();
7468 if (TARGET_ABI_OPEN_VMS)
7469 frame_size = ALPHA_ROUND (sa_size
7470 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7471 + frame_size
7472 + current_function_pretend_args_size);
7473 else if (TARGET_ABI_UNICOSMK)
7474 /* We have to allocate space for the DSIB if we generate a frame. */
7475 frame_size = ALPHA_ROUND (sa_size
7476 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7477 + ALPHA_ROUND (frame_size
7478 + current_function_outgoing_args_size);
7479 else
7480 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7481 + sa_size
7482 + ALPHA_ROUND (frame_size
7483 + current_function_pretend_args_size));
7484
7485 if (TARGET_ABI_OPEN_VMS)
7486 reg_offset = 8;
7487 else
7488 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7489
7490 alpha_sa_mask (&imask, &fmask);
7491
7492 /* Emit an insn to reload GP, if needed. */
7493 if (TARGET_ABI_OSF)
7494 {
7495 alpha_function_needs_gp = alpha_does_function_need_gp ();
7496 if (alpha_function_needs_gp)
7497 emit_insn (gen_prologue_ldgp ());
7498 }
7499
7500 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7501 the call to mcount ourselves, rather than having the linker do it
7502 magically in response to -pg. Since _mcount has special linkage,
7503 don't represent the call as a call. */
7504 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7505 emit_insn (gen_prologue_mcount ());
7506
7507 if (TARGET_ABI_UNICOSMK)
7508 unicosmk_gen_dsib (&imask);
7509
7510 /* Adjust the stack by the frame size. If the frame size is > 4096
7511 bytes, we need to be sure we probe somewhere in the first and last
7512 4096 bytes (we can probably get away without the latter test) and
7513 every 8192 bytes in between. If the frame size is > 32768, we
7514 do this in a loop. Otherwise, we generate the explicit probe
7515 instructions.
7516
7517 Note that we are only allowed to adjust sp once in the prologue. */
7518
7519 if (frame_size <= 32768)
7520 {
7521 if (frame_size > 4096)
7522 {
7523 int probed;
7524
7525 for (probed = 4096; probed < frame_size; probed += 8192)
7526 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7527 ? -probed + 64
7528 : -probed)));
7529
7530 /* We only have to do this probe if we aren't saving registers. */
7531 if (sa_size == 0 && frame_size > probed - 4096)
7532 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7533 }
7534
7535 if (frame_size != 0)
7536 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7537 GEN_INT (TARGET_ABI_UNICOSMK
7538 ? -frame_size + 64
7539 : -frame_size))));
7540 }
7541 else
7542 {
7543 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7544 number of 8192 byte blocks to probe. We then probe each block
7545 in the loop and then set SP to the proper location. If the
7546 amount remaining is > 4096, we have to do one more probe if we
7547 are not saving any registers. */
7548
7549 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7550 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7551 rtx ptr = gen_rtx_REG (DImode, 22);
7552 rtx count = gen_rtx_REG (DImode, 23);
7553 rtx seq;
7554
7555 emit_move_insn (count, GEN_INT (blocks));
7556 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7557 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7558
7559 /* Because of the difficulty in emitting a new basic block this
7560 late in the compilation, generate the loop as a single insn. */
7561 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7562
7563 if (leftover > 4096 && sa_size == 0)
7564 {
7565 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7566 MEM_VOLATILE_P (last) = 1;
7567 emit_move_insn (last, const0_rtx);
7568 }
7569
7570 if (TARGET_ABI_WINDOWS_NT)
7571 {
7572 /* For NT stack unwind (done by 'reverse execution'), it's
7573 not OK to take the result of a loop, even though the value
7574 is already in ptr, so we reload it via a single operation
7575 and subtract it to sp.
7576
7577 Yes, that's correct -- we have to reload the whole constant
7578 into a temporary via ldah+lda then subtract from sp. */
7579
7580 HOST_WIDE_INT lo, hi;
7581 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7582 hi = frame_size - lo;
7583
7584 emit_move_insn (ptr, GEN_INT (hi));
7585 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7586 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7587 ptr));
7588 }
7589 else
7590 {
7591 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7592 GEN_INT (-leftover)));
7593 }
7594
7595 /* This alternative is special, because the DWARF code cannot
7596 possibly intuit through the loop above. So we invent this
7597 note it looks at instead. */
7598 RTX_FRAME_RELATED_P (seq) = 1;
7599 REG_NOTES (seq)
7600 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7601 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7602 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7603 GEN_INT (TARGET_ABI_UNICOSMK
7604 ? -frame_size + 64
7605 : -frame_size))),
7606 REG_NOTES (seq));
7607 }
7608
7609 if (!TARGET_ABI_UNICOSMK)
7610 {
7611 HOST_WIDE_INT sa_bias = 0;
7612
7613 /* Cope with very large offsets to the register save area. */
7614 sa_reg = stack_pointer_rtx;
7615 if (reg_offset + sa_size > 0x8000)
7616 {
7617 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7618 rtx sa_bias_rtx;
7619
7620 if (low + sa_size <= 0x8000)
7621 sa_bias = reg_offset - low, reg_offset = low;
7622 else
7623 sa_bias = reg_offset, reg_offset = 0;
7624
7625 sa_reg = gen_rtx_REG (DImode, 24);
7626 sa_bias_rtx = GEN_INT (sa_bias);
7627
7628 if (add_operand (sa_bias_rtx, DImode))
7629 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7630 else
7631 {
7632 emit_move_insn (sa_reg, sa_bias_rtx);
7633 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7634 }
7635 }
7636
7637 /* Save regs in stack order. Beginning with VMS PV. */
7638 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7639 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7640
7641 /* Save register RA next. */
7642 if (imask & (1UL << REG_RA))
7643 {
7644 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7645 imask &= ~(1UL << REG_RA);
7646 reg_offset += 8;
7647 }
7648
7649 /* Now save any other registers required to be saved. */
7650 for (i = 0; i < 31; i++)
7651 if (imask & (1UL << i))
7652 {
7653 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7654 reg_offset += 8;
7655 }
7656
7657 for (i = 0; i < 31; i++)
7658 if (fmask & (1UL << i))
7659 {
7660 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7661 reg_offset += 8;
7662 }
7663 }
7664 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7665 {
7666 /* The standard frame on the T3E includes space for saving registers.
7667 We just have to use it. We don't have to save the return address and
7668 the old frame pointer here - they are saved in the DSIB. */
7669
7670 reg_offset = -56;
7671 for (i = 9; i < 15; i++)
7672 if (imask & (1UL << i))
7673 {
7674 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7675 reg_offset -= 8;
7676 }
7677 for (i = 2; i < 10; i++)
7678 if (fmask & (1UL << i))
7679 {
7680 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7681 reg_offset -= 8;
7682 }
7683 }
7684
7685 if (TARGET_ABI_OPEN_VMS)
7686 {
7687 if (alpha_procedure_type == PT_REGISTER)
7688 /* Register frame procedures save the fp.
7689 ?? Ought to have a dwarf2 save for this. */
7690 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7691 hard_frame_pointer_rtx);
7692
7693 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7694 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7695 gen_rtx_REG (DImode, REG_PV)));
7696
7697 if (alpha_procedure_type != PT_NULL
7698 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7699 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7700
7701 /* If we have to allocate space for outgoing args, do it now. */
7702 if (current_function_outgoing_args_size != 0)
7703 {
7704 rtx seq
7705 = emit_move_insn (stack_pointer_rtx,
7706 plus_constant
7707 (hard_frame_pointer_rtx,
7708 - (ALPHA_ROUND
7709 (current_function_outgoing_args_size))));
7710
7711 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7712 if ! frame_pointer_needed. Setting the bit will change the CFA
7713 computation rule to use sp again, which would be wrong if we had
7714 frame_pointer_needed, as this means sp might move unpredictably
7715 later on.
7716
7717 Also, note that
7718 frame_pointer_needed
7719 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7720 and
7721 current_function_outgoing_args_size != 0
7722 => alpha_procedure_type != PT_NULL,
7723
7724 so when we are not setting the bit here, we are guaranteed to
7725 have emitted an FRP frame pointer update just before. */
7726 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7727 }
7728 }
7729 else if (!TARGET_ABI_UNICOSMK)
7730 {
7731 /* If we need a frame pointer, set it from the stack pointer. */
7732 if (frame_pointer_needed)
7733 {
7734 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7735 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7736 else
7737 /* This must always be the last instruction in the
7738 prologue, thus we emit a special move + clobber. */
7739 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7740 stack_pointer_rtx, sa_reg)));
7741 }
7742 }
7743
7744 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7745 the prologue, for exception handling reasons, we cannot do this for
7746 any insn that might fault. We could prevent this for mems with a
7747 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7748 have to prevent all such scheduling with a blockage.
7749
7750 Linux, on the other hand, never bothered to implement OSF/1's
7751 exception handling, and so doesn't care about such things. Anyone
7752 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7753
7754 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7755 emit_insn (gen_blockage ());
7756 }
7757
7758 /* Count the number of .file directives, so that .loc is up to date. */
7759 int num_source_filenames = 0;
7760
7761 /* Output the textual info surrounding the prologue. */
7762
7763 void
7764 alpha_start_function (FILE *file, const char *fnname,
7765 tree decl ATTRIBUTE_UNUSED)
7766 {
7767 unsigned long imask = 0;
7768 unsigned long fmask = 0;
7769 /* Stack space needed for pushing registers clobbered by us. */
7770 HOST_WIDE_INT sa_size;
7771 /* Complete stack size needed. */
7772 unsigned HOST_WIDE_INT frame_size;
7773 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7774 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7775 ? 524288
7776 : 1UL << 31;
7777 /* Offset from base reg to register save area. */
7778 HOST_WIDE_INT reg_offset;
7779 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7780 int i;
7781
7782 /* Don't emit an extern directive for functions defined in the same file. */
7783 if (TARGET_ABI_UNICOSMK)
7784 {
7785 tree name_tree;
7786 name_tree = get_identifier (fnname);
7787 TREE_ASM_WRITTEN (name_tree) = 1;
7788 }
7789
7790 alpha_fnname = fnname;
7791 sa_size = alpha_sa_size ();
7792
7793 frame_size = get_frame_size ();
7794 if (TARGET_ABI_OPEN_VMS)
7795 frame_size = ALPHA_ROUND (sa_size
7796 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7797 + frame_size
7798 + current_function_pretend_args_size);
7799 else if (TARGET_ABI_UNICOSMK)
7800 frame_size = ALPHA_ROUND (sa_size
7801 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7802 + ALPHA_ROUND (frame_size
7803 + current_function_outgoing_args_size);
7804 else
7805 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7806 + sa_size
7807 + ALPHA_ROUND (frame_size
7808 + current_function_pretend_args_size));
7809
7810 if (TARGET_ABI_OPEN_VMS)
7811 reg_offset = 8;
7812 else
7813 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7814
7815 alpha_sa_mask (&imask, &fmask);
7816
7817 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7818 We have to do that before the .ent directive as we cannot switch
7819 files within procedures with native ecoff because line numbers are
7820 linked to procedure descriptors.
7821 Outputting the lineno helps debugging of one line functions as they
7822 would otherwise get no line number at all. Please note that we would
7823 like to put out last_linenum from final.c, but it is not accessible. */
7824
7825 if (write_symbols == SDB_DEBUG)
7826 {
7827 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7828 ASM_OUTPUT_SOURCE_FILENAME (file,
7829 DECL_SOURCE_FILE (current_function_decl));
7830 #endif
7831 #ifdef SDB_OUTPUT_SOURCE_LINE
7832 if (debug_info_level != DINFO_LEVEL_TERSE)
7833 SDB_OUTPUT_SOURCE_LINE (file,
7834 DECL_SOURCE_LINE (current_function_decl));
7835 #endif
7836 }
7837
7838 /* Issue function start and label. */
7839 if (TARGET_ABI_OPEN_VMS
7840 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7841 {
7842 fputs ("\t.ent ", file);
7843 assemble_name (file, fnname);
7844 putc ('\n', file);
7845
7846 /* If the function needs GP, we'll write the "..ng" label there.
7847 Otherwise, do it here. */
7848 if (TARGET_ABI_OSF
7849 && ! alpha_function_needs_gp
7850 && ! current_function_is_thunk)
7851 {
7852 putc ('$', file);
7853 assemble_name (file, fnname);
7854 fputs ("..ng:\n", file);
7855 }
7856 }
7857
7858 strcpy (entry_label, fnname);
7859 if (TARGET_ABI_OPEN_VMS)
7860 strcat (entry_label, "..en");
7861
7862 /* For public functions, the label must be globalized by appending an
7863 additional colon. */
7864 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7865 strcat (entry_label, ":");
7866
7867 ASM_OUTPUT_LABEL (file, entry_label);
7868 inside_function = TRUE;
7869
7870 if (TARGET_ABI_OPEN_VMS)
7871 fprintf (file, "\t.base $%d\n", vms_base_regno);
7872
7873 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7874 && !flag_inhibit_size_directive)
7875 {
7876 /* Set flags in procedure descriptor to request IEEE-conformant
7877 math-library routines. The value we set it to is PDSC_EXC_IEEE
7878 (/usr/include/pdsc.h). */
7879 fputs ("\t.eflag 48\n", file);
7880 }
7881
7882 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7883 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7884 alpha_arg_offset = -frame_size + 48;
7885
7886 /* Describe our frame. If the frame size is larger than an integer,
7887 print it as zero to avoid an assembler error. We won't be
7888 properly describing such a frame, but that's the best we can do. */
7889 if (TARGET_ABI_UNICOSMK)
7890 ;
7891 else if (TARGET_ABI_OPEN_VMS)
7892 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7893 HOST_WIDE_INT_PRINT_DEC "\n",
7894 vms_unwind_regno,
7895 frame_size >= (1UL << 31) ? 0 : frame_size,
7896 reg_offset);
7897 else if (!flag_inhibit_size_directive)
7898 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7899 (frame_pointer_needed
7900 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7901 frame_size >= max_frame_size ? 0 : frame_size,
7902 current_function_pretend_args_size);
7903
7904 /* Describe which registers were spilled. */
7905 if (TARGET_ABI_UNICOSMK)
7906 ;
7907 else if (TARGET_ABI_OPEN_VMS)
7908 {
7909 if (imask)
7910 /* ??? Does VMS care if mask contains ra? The old code didn't
7911 set it, so I don't here. */
7912 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7913 if (fmask)
7914 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7915 if (alpha_procedure_type == PT_REGISTER)
7916 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7917 }
7918 else if (!flag_inhibit_size_directive)
7919 {
7920 if (imask)
7921 {
7922 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7923 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7924
7925 for (i = 0; i < 32; ++i)
7926 if (imask & (1UL << i))
7927 reg_offset += 8;
7928 }
7929
7930 if (fmask)
7931 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7932 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7933 }
7934
7935 #if TARGET_ABI_OPEN_VMS
7936 /* Ifdef'ed cause link_section are only available then. */
7937 switch_to_section (readonly_data_section);
7938 fprintf (file, "\t.align 3\n");
7939 assemble_name (file, fnname); fputs ("..na:\n", file);
7940 fputs ("\t.ascii \"", file);
7941 assemble_name (file, fnname);
7942 fputs ("\\0\"\n", file);
7943 alpha_need_linkage (fnname, 1);
7944 switch_to_section (text_section);
7945 #endif
7946 }
7947
7948 /* Emit the .prologue note at the scheduled end of the prologue. */
7949
7950 static void
7951 alpha_output_function_end_prologue (FILE *file)
7952 {
7953 if (TARGET_ABI_UNICOSMK)
7954 ;
7955 else if (TARGET_ABI_OPEN_VMS)
7956 fputs ("\t.prologue\n", file);
7957 else if (TARGET_ABI_WINDOWS_NT)
7958 fputs ("\t.prologue 0\n", file);
7959 else if (!flag_inhibit_size_directive)
7960 fprintf (file, "\t.prologue %d\n",
7961 alpha_function_needs_gp || current_function_is_thunk);
7962 }
7963
7964 /* Write function epilogue. */
7965
7966 /* ??? At some point we will want to support full unwind, and so will
7967 need to mark the epilogue as well. At the moment, we just confuse
7968 dwarf2out. */
7969 #undef FRP
7970 #define FRP(exp) exp
7971
7972 void
7973 alpha_expand_epilogue (void)
7974 {
7975 /* Registers to save. */
7976 unsigned long imask = 0;
7977 unsigned long fmask = 0;
7978 /* Stack space needed for pushing registers clobbered by us. */
7979 HOST_WIDE_INT sa_size;
7980 /* Complete stack size needed. */
7981 HOST_WIDE_INT frame_size;
7982 /* Offset from base reg to register save area. */
7983 HOST_WIDE_INT reg_offset;
7984 int fp_is_frame_pointer, fp_offset;
7985 rtx sa_reg, sa_reg_exp = NULL;
7986 rtx sp_adj1, sp_adj2, mem;
7987 rtx eh_ofs;
7988 int i;
7989
7990 sa_size = alpha_sa_size ();
7991
7992 frame_size = get_frame_size ();
7993 if (TARGET_ABI_OPEN_VMS)
7994 frame_size = ALPHA_ROUND (sa_size
7995 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7996 + frame_size
7997 + current_function_pretend_args_size);
7998 else if (TARGET_ABI_UNICOSMK)
7999 frame_size = ALPHA_ROUND (sa_size
8000 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8001 + ALPHA_ROUND (frame_size
8002 + current_function_outgoing_args_size);
8003 else
8004 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
8005 + sa_size
8006 + ALPHA_ROUND (frame_size
8007 + current_function_pretend_args_size));
8008
8009 if (TARGET_ABI_OPEN_VMS)
8010 {
8011 if (alpha_procedure_type == PT_STACK)
8012 reg_offset = 8;
8013 else
8014 reg_offset = 0;
8015 }
8016 else
8017 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8018
8019 alpha_sa_mask (&imask, &fmask);
8020
8021 fp_is_frame_pointer
8022 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8023 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8024 fp_offset = 0;
8025 sa_reg = stack_pointer_rtx;
8026
8027 if (current_function_calls_eh_return)
8028 eh_ofs = EH_RETURN_STACKADJ_RTX;
8029 else
8030 eh_ofs = NULL_RTX;
8031
8032 if (!TARGET_ABI_UNICOSMK && sa_size)
8033 {
8034 /* If we have a frame pointer, restore SP from it. */
8035 if ((TARGET_ABI_OPEN_VMS
8036 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8037 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8038 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8039
8040 /* Cope with very large offsets to the register save area. */
8041 if (reg_offset + sa_size > 0x8000)
8042 {
8043 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8044 HOST_WIDE_INT bias;
8045
8046 if (low + sa_size <= 0x8000)
8047 bias = reg_offset - low, reg_offset = low;
8048 else
8049 bias = reg_offset, reg_offset = 0;
8050
8051 sa_reg = gen_rtx_REG (DImode, 22);
8052 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8053
8054 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8055 }
8056
8057 /* Restore registers in order, excepting a true frame pointer. */
8058
8059 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8060 if (! eh_ofs)
8061 set_mem_alias_set (mem, alpha_sr_alias_set);
8062 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8063
8064 reg_offset += 8;
8065 imask &= ~(1UL << REG_RA);
8066
8067 for (i = 0; i < 31; ++i)
8068 if (imask & (1UL << i))
8069 {
8070 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8071 fp_offset = reg_offset;
8072 else
8073 {
8074 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8075 set_mem_alias_set (mem, alpha_sr_alias_set);
8076 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8077 }
8078 reg_offset += 8;
8079 }
8080
8081 for (i = 0; i < 31; ++i)
8082 if (fmask & (1UL << i))
8083 {
8084 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8085 set_mem_alias_set (mem, alpha_sr_alias_set);
8086 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8087 reg_offset += 8;
8088 }
8089 }
8090 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8091 {
8092 /* Restore callee-saved general-purpose registers. */
8093
8094 reg_offset = -56;
8095
8096 for (i = 9; i < 15; i++)
8097 if (imask & (1UL << i))
8098 {
8099 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8100 reg_offset));
8101 set_mem_alias_set (mem, alpha_sr_alias_set);
8102 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8103 reg_offset -= 8;
8104 }
8105
8106 for (i = 2; i < 10; i++)
8107 if (fmask & (1UL << i))
8108 {
8109 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8110 reg_offset));
8111 set_mem_alias_set (mem, alpha_sr_alias_set);
8112 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8113 reg_offset -= 8;
8114 }
8115
8116 /* Restore the return address from the DSIB. */
8117
8118 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8119 set_mem_alias_set (mem, alpha_sr_alias_set);
8120 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8121 }
8122
8123 if (frame_size || eh_ofs)
8124 {
8125 sp_adj1 = stack_pointer_rtx;
8126
8127 if (eh_ofs)
8128 {
8129 sp_adj1 = gen_rtx_REG (DImode, 23);
8130 emit_move_insn (sp_adj1,
8131 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8132 }
8133
8134 /* If the stack size is large, begin computation into a temporary
8135 register so as not to interfere with a potential fp restore,
8136 which must be consecutive with an SP restore. */
8137 if (frame_size < 32768
8138 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8139 sp_adj2 = GEN_INT (frame_size);
8140 else if (TARGET_ABI_UNICOSMK)
8141 {
8142 sp_adj1 = gen_rtx_REG (DImode, 23);
8143 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8144 sp_adj2 = const0_rtx;
8145 }
8146 else if (frame_size < 0x40007fffL)
8147 {
8148 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8149
8150 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8151 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8152 sp_adj1 = sa_reg;
8153 else
8154 {
8155 sp_adj1 = gen_rtx_REG (DImode, 23);
8156 FRP (emit_move_insn (sp_adj1, sp_adj2));
8157 }
8158 sp_adj2 = GEN_INT (low);
8159 }
8160 else
8161 {
8162 rtx tmp = gen_rtx_REG (DImode, 23);
8163 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8164 3, false));
8165 if (!sp_adj2)
8166 {
8167 /* We can't drop new things to memory this late, afaik,
8168 so build it up by pieces. */
8169 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8170 -(frame_size < 0)));
8171 gcc_assert (sp_adj2);
8172 }
8173 }
8174
8175 /* From now on, things must be in order. So emit blockages. */
8176
8177 /* Restore the frame pointer. */
8178 if (TARGET_ABI_UNICOSMK)
8179 {
8180 emit_insn (gen_blockage ());
8181 mem = gen_rtx_MEM (DImode,
8182 plus_constant (hard_frame_pointer_rtx, -16));
8183 set_mem_alias_set (mem, alpha_sr_alias_set);
8184 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8185 }
8186 else if (fp_is_frame_pointer)
8187 {
8188 emit_insn (gen_blockage ());
8189 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8190 set_mem_alias_set (mem, alpha_sr_alias_set);
8191 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8192 }
8193 else if (TARGET_ABI_OPEN_VMS)
8194 {
8195 emit_insn (gen_blockage ());
8196 FRP (emit_move_insn (hard_frame_pointer_rtx,
8197 gen_rtx_REG (DImode, vms_save_fp_regno)));
8198 }
8199
8200 /* Restore the stack pointer. */
8201 emit_insn (gen_blockage ());
8202 if (sp_adj2 == const0_rtx)
8203 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8204 else
8205 FRP (emit_move_insn (stack_pointer_rtx,
8206 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8207 }
8208 else
8209 {
8210 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8211 {
8212 emit_insn (gen_blockage ());
8213 FRP (emit_move_insn (hard_frame_pointer_rtx,
8214 gen_rtx_REG (DImode, vms_save_fp_regno)));
8215 }
8216 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8217 {
8218 /* Decrement the frame pointer if the function does not have a
8219 frame. */
8220
8221 emit_insn (gen_blockage ());
8222 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8223 hard_frame_pointer_rtx, constm1_rtx)));
8224 }
8225 }
8226 }
8227 \f
8228 /* Output the rest of the textual info surrounding the epilogue. */
8229
8230 void
8231 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8232 {
8233 rtx insn;
8234
8235 /* We output a nop after noreturn calls at the very end of the function to
8236 ensure that the return address always remains in the caller's code range,
8237 as not doing so might confuse unwinding engines. */
8238 insn = get_last_insn ();
8239 if (!INSN_P (insn))
8240 insn = prev_active_insn (insn);
8241 if (GET_CODE (insn) == CALL_INSN)
8242 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8243
8244 #if TARGET_ABI_OPEN_VMS
8245 alpha_write_linkage (file, fnname, decl);
8246 #endif
8247
8248 /* End the function. */
8249 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8250 {
8251 fputs ("\t.end ", file);
8252 assemble_name (file, fnname);
8253 putc ('\n', file);
8254 }
8255 inside_function = FALSE;
8256
8257 /* Output jump tables and the static subroutine information block. */
8258 if (TARGET_ABI_UNICOSMK)
8259 {
8260 unicosmk_output_ssib (file, fnname);
8261 unicosmk_output_deferred_case_vectors (file);
8262 }
8263 }
8264
8265 #if TARGET_ABI_OSF
8266 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8267
8268 In order to avoid the hordes of differences between generated code
8269 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8270 lots of code loading up large constants, generate rtl and emit it
8271 instead of going straight to text.
8272
8273 Not sure why this idea hasn't been explored before... */
8274
8275 static void
8276 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8277 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8278 tree function)
8279 {
8280 HOST_WIDE_INT hi, lo;
8281 rtx this, insn, funexp;
8282
8283 reset_block_changes ();
8284
8285 /* We always require a valid GP. */
8286 emit_insn (gen_prologue_ldgp ());
8287 emit_note (NOTE_INSN_PROLOGUE_END);
8288
8289 /* Find the "this" pointer. If the function returns a structure,
8290 the structure return pointer is in $16. */
8291 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8292 this = gen_rtx_REG (Pmode, 17);
8293 else
8294 this = gen_rtx_REG (Pmode, 16);
8295
8296 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8297 entire constant for the add. */
8298 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8299 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8300 if (hi + lo == delta)
8301 {
8302 if (hi)
8303 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8304 if (lo)
8305 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8306 }
8307 else
8308 {
8309 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8310 delta, -(delta < 0));
8311 emit_insn (gen_adddi3 (this, this, tmp));
8312 }
8313
8314 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8315 if (vcall_offset)
8316 {
8317 rtx tmp, tmp2;
8318
8319 tmp = gen_rtx_REG (Pmode, 0);
8320 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8321
8322 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8323 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8324 if (hi + lo == vcall_offset)
8325 {
8326 if (hi)
8327 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8328 }
8329 else
8330 {
8331 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8332 vcall_offset, -(vcall_offset < 0));
8333 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8334 lo = 0;
8335 }
8336 if (lo)
8337 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8338 else
8339 tmp2 = tmp;
8340 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8341
8342 emit_insn (gen_adddi3 (this, this, tmp));
8343 }
8344
8345 /* Generate a tail call to the target function. */
8346 if (! TREE_USED (function))
8347 {
8348 assemble_external (function);
8349 TREE_USED (function) = 1;
8350 }
8351 funexp = XEXP (DECL_RTL (function), 0);
8352 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8353 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8354 SIBLING_CALL_P (insn) = 1;
8355
8356 /* Run just enough of rest_of_compilation to get the insns emitted.
8357 There's not really enough bulk here to make other passes such as
8358 instruction scheduling worth while. Note that use_thunk calls
8359 assemble_start_function and assemble_end_function. */
8360 insn = get_insns ();
8361 insn_locators_initialize ();
8362 shorten_branches (insn);
8363 final_start_function (insn, file, 1);
8364 final (insn, file, 1);
8365 final_end_function ();
8366 }
8367 #endif /* TARGET_ABI_OSF */
8368 \f
8369 /* Debugging support. */
8370
8371 #include "gstab.h"
8372
8373 /* Count the number of sdb related labels are generated (to find block
8374 start and end boundaries). */
8375
8376 int sdb_label_count = 0;
8377
8378 /* Name of the file containing the current function. */
8379
8380 static const char *current_function_file = "";
8381
8382 /* Offsets to alpha virtual arg/local debugging pointers. */
8383
8384 long alpha_arg_offset;
8385 long alpha_auto_offset;
8386 \f
8387 /* Emit a new filename to a stream. */
8388
8389 void
8390 alpha_output_filename (FILE *stream, const char *name)
8391 {
8392 static int first_time = TRUE;
8393
8394 if (first_time)
8395 {
8396 first_time = FALSE;
8397 ++num_source_filenames;
8398 current_function_file = name;
8399 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8400 output_quoted_string (stream, name);
8401 fprintf (stream, "\n");
8402 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8403 fprintf (stream, "\t#@stabs\n");
8404 }
8405
8406 else if (write_symbols == DBX_DEBUG)
8407 /* dbxout.c will emit an appropriate .stabs directive. */
8408 return;
8409
8410 else if (name != current_function_file
8411 && strcmp (name, current_function_file) != 0)
8412 {
8413 if (inside_function && ! TARGET_GAS)
8414 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8415 else
8416 {
8417 ++num_source_filenames;
8418 current_function_file = name;
8419 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8420 }
8421
8422 output_quoted_string (stream, name);
8423 fprintf (stream, "\n");
8424 }
8425 }
8426 \f
8427 /* Structure to show the current status of registers and memory. */
8428
8429 struct shadow_summary
8430 {
8431 struct {
8432 unsigned int i : 31; /* Mask of int regs */
8433 unsigned int fp : 31; /* Mask of fp regs */
8434 unsigned int mem : 1; /* mem == imem | fpmem */
8435 } used, defd;
8436 };
8437
8438 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8439 to the summary structure. SET is nonzero if the insn is setting the
8440 object, otherwise zero. */
8441
8442 static void
8443 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8444 {
8445 const char *format_ptr;
8446 int i, j;
8447
8448 if (x == 0)
8449 return;
8450
8451 switch (GET_CODE (x))
8452 {
8453 /* ??? Note that this case would be incorrect if the Alpha had a
8454 ZERO_EXTRACT in SET_DEST. */
8455 case SET:
8456 summarize_insn (SET_SRC (x), sum, 0);
8457 summarize_insn (SET_DEST (x), sum, 1);
8458 break;
8459
8460 case CLOBBER:
8461 summarize_insn (XEXP (x, 0), sum, 1);
8462 break;
8463
8464 case USE:
8465 summarize_insn (XEXP (x, 0), sum, 0);
8466 break;
8467
8468 case ASM_OPERANDS:
8469 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8470 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8471 break;
8472
8473 case PARALLEL:
8474 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8475 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8476 break;
8477
8478 case SUBREG:
8479 summarize_insn (SUBREG_REG (x), sum, 0);
8480 break;
8481
8482 case REG:
8483 {
8484 int regno = REGNO (x);
8485 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8486
8487 if (regno == 31 || regno == 63)
8488 break;
8489
8490 if (set)
8491 {
8492 if (regno < 32)
8493 sum->defd.i |= mask;
8494 else
8495 sum->defd.fp |= mask;
8496 }
8497 else
8498 {
8499 if (regno < 32)
8500 sum->used.i |= mask;
8501 else
8502 sum->used.fp |= mask;
8503 }
8504 }
8505 break;
8506
8507 case MEM:
8508 if (set)
8509 sum->defd.mem = 1;
8510 else
8511 sum->used.mem = 1;
8512
8513 /* Find the regs used in memory address computation: */
8514 summarize_insn (XEXP (x, 0), sum, 0);
8515 break;
8516
8517 case CONST_INT: case CONST_DOUBLE:
8518 case SYMBOL_REF: case LABEL_REF: case CONST:
8519 case SCRATCH: case ASM_INPUT:
8520 break;
8521
8522 /* Handle common unary and binary ops for efficiency. */
8523 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8524 case MOD: case UDIV: case UMOD: case AND: case IOR:
8525 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8526 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8527 case NE: case EQ: case GE: case GT: case LE:
8528 case LT: case GEU: case GTU: case LEU: case LTU:
8529 summarize_insn (XEXP (x, 0), sum, 0);
8530 summarize_insn (XEXP (x, 1), sum, 0);
8531 break;
8532
8533 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8534 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8535 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8536 case SQRT: case FFS:
8537 summarize_insn (XEXP (x, 0), sum, 0);
8538 break;
8539
8540 default:
8541 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8542 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8543 switch (format_ptr[i])
8544 {
8545 case 'e':
8546 summarize_insn (XEXP (x, i), sum, 0);
8547 break;
8548
8549 case 'E':
8550 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8551 summarize_insn (XVECEXP (x, i, j), sum, 0);
8552 break;
8553
8554 case 'i':
8555 break;
8556
8557 default:
8558 gcc_unreachable ();
8559 }
8560 }
8561 }
8562
8563 /* Ensure a sufficient number of `trapb' insns are in the code when
8564 the user requests code with a trap precision of functions or
8565 instructions.
8566
8567 In naive mode, when the user requests a trap-precision of
8568 "instruction", a trapb is needed after every instruction that may
8569 generate a trap. This ensures that the code is resumption safe but
8570 it is also slow.
8571
8572 When optimizations are turned on, we delay issuing a trapb as long
8573 as possible. In this context, a trap shadow is the sequence of
8574 instructions that starts with a (potentially) trap generating
8575 instruction and extends to the next trapb or call_pal instruction
8576 (but GCC never generates call_pal by itself). We can delay (and
8577 therefore sometimes omit) a trapb subject to the following
8578 conditions:
8579
8580 (a) On entry to the trap shadow, if any Alpha register or memory
8581 location contains a value that is used as an operand value by some
8582 instruction in the trap shadow (live on entry), then no instruction
8583 in the trap shadow may modify the register or memory location.
8584
8585 (b) Within the trap shadow, the computation of the base register
8586 for a memory load or store instruction may not involve using the
8587 result of an instruction that might generate an UNPREDICTABLE
8588 result.
8589
8590 (c) Within the trap shadow, no register may be used more than once
8591 as a destination register. (This is to make life easier for the
8592 trap-handler.)
8593
8594 (d) The trap shadow may not include any branch instructions. */
8595
8596 static void
8597 alpha_handle_trap_shadows (void)
8598 {
8599 struct shadow_summary shadow;
8600 int trap_pending, exception_nesting;
8601 rtx i, n;
8602
8603 trap_pending = 0;
8604 exception_nesting = 0;
8605 shadow.used.i = 0;
8606 shadow.used.fp = 0;
8607 shadow.used.mem = 0;
8608 shadow.defd = shadow.used;
8609
8610 for (i = get_insns (); i ; i = NEXT_INSN (i))
8611 {
8612 if (GET_CODE (i) == NOTE)
8613 {
8614 switch (NOTE_LINE_NUMBER (i))
8615 {
8616 case NOTE_INSN_EH_REGION_BEG:
8617 exception_nesting++;
8618 if (trap_pending)
8619 goto close_shadow;
8620 break;
8621
8622 case NOTE_INSN_EH_REGION_END:
8623 exception_nesting--;
8624 if (trap_pending)
8625 goto close_shadow;
8626 break;
8627
8628 case NOTE_INSN_EPILOGUE_BEG:
8629 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8630 goto close_shadow;
8631 break;
8632 }
8633 }
8634 else if (trap_pending)
8635 {
8636 if (alpha_tp == ALPHA_TP_FUNC)
8637 {
8638 if (GET_CODE (i) == JUMP_INSN
8639 && GET_CODE (PATTERN (i)) == RETURN)
8640 goto close_shadow;
8641 }
8642 else if (alpha_tp == ALPHA_TP_INSN)
8643 {
8644 if (optimize > 0)
8645 {
8646 struct shadow_summary sum;
8647
8648 sum.used.i = 0;
8649 sum.used.fp = 0;
8650 sum.used.mem = 0;
8651 sum.defd = sum.used;
8652
8653 switch (GET_CODE (i))
8654 {
8655 case INSN:
8656 /* Annoyingly, get_attr_trap will die on these. */
8657 if (GET_CODE (PATTERN (i)) == USE
8658 || GET_CODE (PATTERN (i)) == CLOBBER)
8659 break;
8660
8661 summarize_insn (PATTERN (i), &sum, 0);
8662
8663 if ((sum.defd.i & shadow.defd.i)
8664 || (sum.defd.fp & shadow.defd.fp))
8665 {
8666 /* (c) would be violated */
8667 goto close_shadow;
8668 }
8669
8670 /* Combine shadow with summary of current insn: */
8671 shadow.used.i |= sum.used.i;
8672 shadow.used.fp |= sum.used.fp;
8673 shadow.used.mem |= sum.used.mem;
8674 shadow.defd.i |= sum.defd.i;
8675 shadow.defd.fp |= sum.defd.fp;
8676 shadow.defd.mem |= sum.defd.mem;
8677
8678 if ((sum.defd.i & shadow.used.i)
8679 || (sum.defd.fp & shadow.used.fp)
8680 || (sum.defd.mem & shadow.used.mem))
8681 {
8682 /* (a) would be violated (also takes care of (b)) */
8683 gcc_assert (get_attr_trap (i) != TRAP_YES
8684 || (!(sum.defd.i & sum.used.i)
8685 && !(sum.defd.fp & sum.used.fp)));
8686
8687 goto close_shadow;
8688 }
8689 break;
8690
8691 case JUMP_INSN:
8692 case CALL_INSN:
8693 case CODE_LABEL:
8694 goto close_shadow;
8695
8696 default:
8697 gcc_unreachable ();
8698 }
8699 }
8700 else
8701 {
8702 close_shadow:
8703 n = emit_insn_before (gen_trapb (), i);
8704 PUT_MODE (n, TImode);
8705 PUT_MODE (i, TImode);
8706 trap_pending = 0;
8707 shadow.used.i = 0;
8708 shadow.used.fp = 0;
8709 shadow.used.mem = 0;
8710 shadow.defd = shadow.used;
8711 }
8712 }
8713 }
8714
8715 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8716 && GET_CODE (i) == INSN
8717 && GET_CODE (PATTERN (i)) != USE
8718 && GET_CODE (PATTERN (i)) != CLOBBER
8719 && get_attr_trap (i) == TRAP_YES)
8720 {
8721 if (optimize && !trap_pending)
8722 summarize_insn (PATTERN (i), &shadow, 0);
8723 trap_pending = 1;
8724 }
8725 }
8726 }
8727 \f
8728 /* Alpha can only issue instruction groups simultaneously if they are
8729 suitably aligned. This is very processor-specific. */
8730 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8731 that are marked "fake". These instructions do not exist on that target,
8732 but it is possible to see these insns with deranged combinations of
8733 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8734 choose a result at random. */
8735
8736 enum alphaev4_pipe {
8737 EV4_STOP = 0,
8738 EV4_IB0 = 1,
8739 EV4_IB1 = 2,
8740 EV4_IBX = 4
8741 };
8742
8743 enum alphaev5_pipe {
8744 EV5_STOP = 0,
8745 EV5_NONE = 1,
8746 EV5_E01 = 2,
8747 EV5_E0 = 4,
8748 EV5_E1 = 8,
8749 EV5_FAM = 16,
8750 EV5_FA = 32,
8751 EV5_FM = 64
8752 };
8753
8754 static enum alphaev4_pipe
8755 alphaev4_insn_pipe (rtx insn)
8756 {
8757 if (recog_memoized (insn) < 0)
8758 return EV4_STOP;
8759 if (get_attr_length (insn) != 4)
8760 return EV4_STOP;
8761
8762 switch (get_attr_type (insn))
8763 {
8764 case TYPE_ILD:
8765 case TYPE_LDSYM:
8766 case TYPE_FLD:
8767 case TYPE_LD_L:
8768 return EV4_IBX;
8769
8770 case TYPE_IADD:
8771 case TYPE_ILOG:
8772 case TYPE_ICMOV:
8773 case TYPE_ICMP:
8774 case TYPE_FST:
8775 case TYPE_SHIFT:
8776 case TYPE_IMUL:
8777 case TYPE_FBR:
8778 case TYPE_MVI: /* fake */
8779 return EV4_IB0;
8780
8781 case TYPE_IST:
8782 case TYPE_MISC:
8783 case TYPE_IBR:
8784 case TYPE_JSR:
8785 case TYPE_CALLPAL:
8786 case TYPE_FCPYS:
8787 case TYPE_FCMOV:
8788 case TYPE_FADD:
8789 case TYPE_FDIV:
8790 case TYPE_FMUL:
8791 case TYPE_ST_C:
8792 case TYPE_MB:
8793 case TYPE_FSQRT: /* fake */
8794 case TYPE_FTOI: /* fake */
8795 case TYPE_ITOF: /* fake */
8796 return EV4_IB1;
8797
8798 default:
8799 gcc_unreachable ();
8800 }
8801 }
8802
8803 static enum alphaev5_pipe
8804 alphaev5_insn_pipe (rtx insn)
8805 {
8806 if (recog_memoized (insn) < 0)
8807 return EV5_STOP;
8808 if (get_attr_length (insn) != 4)
8809 return EV5_STOP;
8810
8811 switch (get_attr_type (insn))
8812 {
8813 case TYPE_ILD:
8814 case TYPE_FLD:
8815 case TYPE_LDSYM:
8816 case TYPE_IADD:
8817 case TYPE_ILOG:
8818 case TYPE_ICMOV:
8819 case TYPE_ICMP:
8820 return EV5_E01;
8821
8822 case TYPE_IST:
8823 case TYPE_FST:
8824 case TYPE_SHIFT:
8825 case TYPE_IMUL:
8826 case TYPE_MISC:
8827 case TYPE_MVI:
8828 case TYPE_LD_L:
8829 case TYPE_ST_C:
8830 case TYPE_MB:
8831 case TYPE_FTOI: /* fake */
8832 case TYPE_ITOF: /* fake */
8833 return EV5_E0;
8834
8835 case TYPE_IBR:
8836 case TYPE_JSR:
8837 case TYPE_CALLPAL:
8838 return EV5_E1;
8839
8840 case TYPE_FCPYS:
8841 return EV5_FAM;
8842
8843 case TYPE_FBR:
8844 case TYPE_FCMOV:
8845 case TYPE_FADD:
8846 case TYPE_FDIV:
8847 case TYPE_FSQRT: /* fake */
8848 return EV5_FA;
8849
8850 case TYPE_FMUL:
8851 return EV5_FM;
8852
8853 default:
8854 gcc_unreachable ();
8855 }
8856 }
8857
8858 /* IN_USE is a mask of the slots currently filled within the insn group.
8859 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8860 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8861
8862 LEN is, of course, the length of the group in bytes. */
8863
8864 static rtx
8865 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8866 {
8867 int len, in_use;
8868
8869 len = in_use = 0;
8870
8871 if (! INSN_P (insn)
8872 || GET_CODE (PATTERN (insn)) == CLOBBER
8873 || GET_CODE (PATTERN (insn)) == USE)
8874 goto next_and_done;
8875
8876 while (1)
8877 {
8878 enum alphaev4_pipe pipe;
8879
8880 pipe = alphaev4_insn_pipe (insn);
8881 switch (pipe)
8882 {
8883 case EV4_STOP:
8884 /* Force complex instructions to start new groups. */
8885 if (in_use)
8886 goto done;
8887
8888 /* If this is a completely unrecognized insn, it's an asm.
8889 We don't know how long it is, so record length as -1 to
8890 signal a needed realignment. */
8891 if (recog_memoized (insn) < 0)
8892 len = -1;
8893 else
8894 len = get_attr_length (insn);
8895 goto next_and_done;
8896
8897 case EV4_IBX:
8898 if (in_use & EV4_IB0)
8899 {
8900 if (in_use & EV4_IB1)
8901 goto done;
8902 in_use |= EV4_IB1;
8903 }
8904 else
8905 in_use |= EV4_IB0 | EV4_IBX;
8906 break;
8907
8908 case EV4_IB0:
8909 if (in_use & EV4_IB0)
8910 {
8911 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8912 goto done;
8913 in_use |= EV4_IB1;
8914 }
8915 in_use |= EV4_IB0;
8916 break;
8917
8918 case EV4_IB1:
8919 if (in_use & EV4_IB1)
8920 goto done;
8921 in_use |= EV4_IB1;
8922 break;
8923
8924 default:
8925 gcc_unreachable ();
8926 }
8927 len += 4;
8928
8929 /* Haifa doesn't do well scheduling branches. */
8930 if (GET_CODE (insn) == JUMP_INSN)
8931 goto next_and_done;
8932
8933 next:
8934 insn = next_nonnote_insn (insn);
8935
8936 if (!insn || ! INSN_P (insn))
8937 goto done;
8938
8939 /* Let Haifa tell us where it thinks insn group boundaries are. */
8940 if (GET_MODE (insn) == TImode)
8941 goto done;
8942
8943 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8944 goto next;
8945 }
8946
8947 next_and_done:
8948 insn = next_nonnote_insn (insn);
8949
8950 done:
8951 *plen = len;
8952 *pin_use = in_use;
8953 return insn;
8954 }
8955
8956 /* IN_USE is a mask of the slots currently filled within the insn group.
8957 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8958 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8959
8960 LEN is, of course, the length of the group in bytes. */
8961
8962 static rtx
8963 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8964 {
8965 int len, in_use;
8966
8967 len = in_use = 0;
8968
8969 if (! INSN_P (insn)
8970 || GET_CODE (PATTERN (insn)) == CLOBBER
8971 || GET_CODE (PATTERN (insn)) == USE)
8972 goto next_and_done;
8973
8974 while (1)
8975 {
8976 enum alphaev5_pipe pipe;
8977
8978 pipe = alphaev5_insn_pipe (insn);
8979 switch (pipe)
8980 {
8981 case EV5_STOP:
8982 /* Force complex instructions to start new groups. */
8983 if (in_use)
8984 goto done;
8985
8986 /* If this is a completely unrecognized insn, it's an asm.
8987 We don't know how long it is, so record length as -1 to
8988 signal a needed realignment. */
8989 if (recog_memoized (insn) < 0)
8990 len = -1;
8991 else
8992 len = get_attr_length (insn);
8993 goto next_and_done;
8994
8995 /* ??? Most of the places below, we would like to assert never
8996 happen, as it would indicate an error either in Haifa, or
8997 in the scheduling description. Unfortunately, Haifa never
8998 schedules the last instruction of the BB, so we don't have
8999 an accurate TI bit to go off. */
9000 case EV5_E01:
9001 if (in_use & EV5_E0)
9002 {
9003 if (in_use & EV5_E1)
9004 goto done;
9005 in_use |= EV5_E1;
9006 }
9007 else
9008 in_use |= EV5_E0 | EV5_E01;
9009 break;
9010
9011 case EV5_E0:
9012 if (in_use & EV5_E0)
9013 {
9014 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9015 goto done;
9016 in_use |= EV5_E1;
9017 }
9018 in_use |= EV5_E0;
9019 break;
9020
9021 case EV5_E1:
9022 if (in_use & EV5_E1)
9023 goto done;
9024 in_use |= EV5_E1;
9025 break;
9026
9027 case EV5_FAM:
9028 if (in_use & EV5_FA)
9029 {
9030 if (in_use & EV5_FM)
9031 goto done;
9032 in_use |= EV5_FM;
9033 }
9034 else
9035 in_use |= EV5_FA | EV5_FAM;
9036 break;
9037
9038 case EV5_FA:
9039 if (in_use & EV5_FA)
9040 goto done;
9041 in_use |= EV5_FA;
9042 break;
9043
9044 case EV5_FM:
9045 if (in_use & EV5_FM)
9046 goto done;
9047 in_use |= EV5_FM;
9048 break;
9049
9050 case EV5_NONE:
9051 break;
9052
9053 default:
9054 gcc_unreachable ();
9055 }
9056 len += 4;
9057
9058 /* Haifa doesn't do well scheduling branches. */
9059 /* ??? If this is predicted not-taken, slotting continues, except
9060 that no more IBR, FBR, or JSR insns may be slotted. */
9061 if (GET_CODE (insn) == JUMP_INSN)
9062 goto next_and_done;
9063
9064 next:
9065 insn = next_nonnote_insn (insn);
9066
9067 if (!insn || ! INSN_P (insn))
9068 goto done;
9069
9070 /* Let Haifa tell us where it thinks insn group boundaries are. */
9071 if (GET_MODE (insn) == TImode)
9072 goto done;
9073
9074 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9075 goto next;
9076 }
9077
9078 next_and_done:
9079 insn = next_nonnote_insn (insn);
9080
9081 done:
9082 *plen = len;
9083 *pin_use = in_use;
9084 return insn;
9085 }
9086
9087 static rtx
9088 alphaev4_next_nop (int *pin_use)
9089 {
9090 int in_use = *pin_use;
9091 rtx nop;
9092
9093 if (!(in_use & EV4_IB0))
9094 {
9095 in_use |= EV4_IB0;
9096 nop = gen_nop ();
9097 }
9098 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9099 {
9100 in_use |= EV4_IB1;
9101 nop = gen_nop ();
9102 }
9103 else if (TARGET_FP && !(in_use & EV4_IB1))
9104 {
9105 in_use |= EV4_IB1;
9106 nop = gen_fnop ();
9107 }
9108 else
9109 nop = gen_unop ();
9110
9111 *pin_use = in_use;
9112 return nop;
9113 }
9114
9115 static rtx
9116 alphaev5_next_nop (int *pin_use)
9117 {
9118 int in_use = *pin_use;
9119 rtx nop;
9120
9121 if (!(in_use & EV5_E1))
9122 {
9123 in_use |= EV5_E1;
9124 nop = gen_nop ();
9125 }
9126 else if (TARGET_FP && !(in_use & EV5_FA))
9127 {
9128 in_use |= EV5_FA;
9129 nop = gen_fnop ();
9130 }
9131 else if (TARGET_FP && !(in_use & EV5_FM))
9132 {
9133 in_use |= EV5_FM;
9134 nop = gen_fnop ();
9135 }
9136 else
9137 nop = gen_unop ();
9138
9139 *pin_use = in_use;
9140 return nop;
9141 }
9142
9143 /* The instruction group alignment main loop. */
9144
9145 static void
9146 alpha_align_insns (unsigned int max_align,
9147 rtx (*next_group) (rtx, int *, int *),
9148 rtx (*next_nop) (int *))
9149 {
9150 /* ALIGN is the known alignment for the insn group. */
9151 unsigned int align;
9152 /* OFS is the offset of the current insn in the insn group. */
9153 int ofs;
9154 int prev_in_use, in_use, len, ldgp;
9155 rtx i, next;
9156
9157 /* Let shorten branches care for assigning alignments to code labels. */
9158 shorten_branches (get_insns ());
9159
9160 if (align_functions < 4)
9161 align = 4;
9162 else if ((unsigned int) align_functions < max_align)
9163 align = align_functions;
9164 else
9165 align = max_align;
9166
9167 ofs = prev_in_use = 0;
9168 i = get_insns ();
9169 if (GET_CODE (i) == NOTE)
9170 i = next_nonnote_insn (i);
9171
9172 ldgp = alpha_function_needs_gp ? 8 : 0;
9173
9174 while (i)
9175 {
9176 next = (*next_group) (i, &in_use, &len);
9177
9178 /* When we see a label, resync alignment etc. */
9179 if (GET_CODE (i) == CODE_LABEL)
9180 {
9181 unsigned int new_align = 1 << label_to_alignment (i);
9182
9183 if (new_align >= align)
9184 {
9185 align = new_align < max_align ? new_align : max_align;
9186 ofs = 0;
9187 }
9188
9189 else if (ofs & (new_align-1))
9190 ofs = (ofs | (new_align-1)) + 1;
9191 gcc_assert (!len);
9192 }
9193
9194 /* Handle complex instructions special. */
9195 else if (in_use == 0)
9196 {
9197 /* Asms will have length < 0. This is a signal that we have
9198 lost alignment knowledge. Assume, however, that the asm
9199 will not mis-align instructions. */
9200 if (len < 0)
9201 {
9202 ofs = 0;
9203 align = 4;
9204 len = 0;
9205 }
9206 }
9207
9208 /* If the known alignment is smaller than the recognized insn group,
9209 realign the output. */
9210 else if ((int) align < len)
9211 {
9212 unsigned int new_log_align = len > 8 ? 4 : 3;
9213 rtx prev, where;
9214
9215 where = prev = prev_nonnote_insn (i);
9216 if (!where || GET_CODE (where) != CODE_LABEL)
9217 where = i;
9218
9219 /* Can't realign between a call and its gp reload. */
9220 if (! (TARGET_EXPLICIT_RELOCS
9221 && prev && GET_CODE (prev) == CALL_INSN))
9222 {
9223 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9224 align = 1 << new_log_align;
9225 ofs = 0;
9226 }
9227 }
9228
9229 /* We may not insert padding inside the initial ldgp sequence. */
9230 else if (ldgp > 0)
9231 ldgp -= len;
9232
9233 /* If the group won't fit in the same INT16 as the previous,
9234 we need to add padding to keep the group together. Rather
9235 than simply leaving the insn filling to the assembler, we
9236 can make use of the knowledge of what sorts of instructions
9237 were issued in the previous group to make sure that all of
9238 the added nops are really free. */
9239 else if (ofs + len > (int) align)
9240 {
9241 int nop_count = (align - ofs) / 4;
9242 rtx where;
9243
9244 /* Insert nops before labels, branches, and calls to truly merge
9245 the execution of the nops with the previous instruction group. */
9246 where = prev_nonnote_insn (i);
9247 if (where)
9248 {
9249 if (GET_CODE (where) == CODE_LABEL)
9250 {
9251 rtx where2 = prev_nonnote_insn (where);
9252 if (where2 && GET_CODE (where2) == JUMP_INSN)
9253 where = where2;
9254 }
9255 else if (GET_CODE (where) == INSN)
9256 where = i;
9257 }
9258 else
9259 where = i;
9260
9261 do
9262 emit_insn_before ((*next_nop)(&prev_in_use), where);
9263 while (--nop_count);
9264 ofs = 0;
9265 }
9266
9267 ofs = (ofs + len) & (align - 1);
9268 prev_in_use = in_use;
9269 i = next;
9270 }
9271 }
9272 \f
9273 /* Machine dependent reorg pass. */
9274
9275 static void
9276 alpha_reorg (void)
9277 {
9278 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9279 alpha_handle_trap_shadows ();
9280
9281 /* Due to the number of extra trapb insns, don't bother fixing up
9282 alignment when trap precision is instruction. Moreover, we can
9283 only do our job when sched2 is run. */
9284 if (optimize && !optimize_size
9285 && alpha_tp != ALPHA_TP_INSN
9286 && flag_schedule_insns_after_reload)
9287 {
9288 if (alpha_tune == PROCESSOR_EV4)
9289 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9290 else if (alpha_tune == PROCESSOR_EV5)
9291 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9292 }
9293 }
9294 \f
9295 #if !TARGET_ABI_UNICOSMK
9296
9297 #ifdef HAVE_STAMP_H
9298 #include <stamp.h>
9299 #endif
9300
9301 static void
9302 alpha_file_start (void)
9303 {
9304 #ifdef OBJECT_FORMAT_ELF
9305 /* If emitting dwarf2 debug information, we cannot generate a .file
9306 directive to start the file, as it will conflict with dwarf2out
9307 file numbers. So it's only useful when emitting mdebug output. */
9308 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9309 #endif
9310
9311 default_file_start ();
9312 #ifdef MS_STAMP
9313 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9314 #endif
9315
9316 fputs ("\t.set noreorder\n", asm_out_file);
9317 fputs ("\t.set volatile\n", asm_out_file);
9318 if (!TARGET_ABI_OPEN_VMS)
9319 fputs ("\t.set noat\n", asm_out_file);
9320 if (TARGET_EXPLICIT_RELOCS)
9321 fputs ("\t.set nomacro\n", asm_out_file);
9322 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9323 {
9324 const char *arch;
9325
9326 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9327 arch = "ev6";
9328 else if (TARGET_MAX)
9329 arch = "pca56";
9330 else if (TARGET_BWX)
9331 arch = "ev56";
9332 else if (alpha_cpu == PROCESSOR_EV5)
9333 arch = "ev5";
9334 else
9335 arch = "ev4";
9336
9337 fprintf (asm_out_file, "\t.arch %s\n", arch);
9338 }
9339 }
9340 #endif
9341
9342 #ifdef OBJECT_FORMAT_ELF
9343 /* Since we don't have a .dynbss section, we should not allow global
9344 relocations in the .rodata section. */
9345
9346 static int
9347 alpha_elf_reloc_rw_mask (void)
9348 {
9349 return flag_pic ? 3 : 2;
9350 }
9351
9352 /* Return a section for X. The only special thing we do here is to
9353 honor small data. */
9354
9355 static section *
9356 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9357 unsigned HOST_WIDE_INT align)
9358 {
9359 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9360 /* ??? Consider using mergeable sdata sections. */
9361 return sdata_section;
9362 else
9363 return default_elf_select_rtx_section (mode, x, align);
9364 }
9365
9366 static unsigned int
9367 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9368 {
9369 unsigned int flags = 0;
9370
9371 if (strcmp (name, ".sdata") == 0
9372 || strncmp (name, ".sdata.", 7) == 0
9373 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9374 || strcmp (name, ".sbss") == 0
9375 || strncmp (name, ".sbss.", 6) == 0
9376 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9377 flags = SECTION_SMALL;
9378
9379 flags |= default_section_type_flags (decl, name, reloc);
9380 return flags;
9381 }
9382 #endif /* OBJECT_FORMAT_ELF */
9383 \f
9384 /* Structure to collect function names for final output in link section. */
9385 /* Note that items marked with GTY can't be ifdef'ed out. */
9386
9387 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9388 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9389
9390 struct alpha_links GTY(())
9391 {
9392 int num;
9393 rtx linkage;
9394 enum links_kind lkind;
9395 enum reloc_kind rkind;
9396 };
9397
9398 struct alpha_funcs GTY(())
9399 {
9400 int num;
9401 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9402 links;
9403 };
9404
9405 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9406 splay_tree alpha_links_tree;
9407 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9408 splay_tree alpha_funcs_tree;
9409
9410 static GTY(()) int alpha_funcs_num;
9411
9412 #if TARGET_ABI_OPEN_VMS
9413
9414 /* Return the VMS argument type corresponding to MODE. */
9415
9416 enum avms_arg_type
9417 alpha_arg_type (enum machine_mode mode)
9418 {
9419 switch (mode)
9420 {
9421 case SFmode:
9422 return TARGET_FLOAT_VAX ? FF : FS;
9423 case DFmode:
9424 return TARGET_FLOAT_VAX ? FD : FT;
9425 default:
9426 return I64;
9427 }
9428 }
9429
9430 /* Return an rtx for an integer representing the VMS Argument Information
9431 register value. */
9432
9433 rtx
9434 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9435 {
9436 unsigned HOST_WIDE_INT regval = cum.num_args;
9437 int i;
9438
9439 for (i = 0; i < 6; i++)
9440 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9441
9442 return GEN_INT (regval);
9443 }
9444 \f
9445 /* Make (or fake) .linkage entry for function call.
9446
9447 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9448
9449 Return an SYMBOL_REF rtx for the linkage. */
9450
9451 rtx
9452 alpha_need_linkage (const char *name, int is_local)
9453 {
9454 splay_tree_node node;
9455 struct alpha_links *al;
9456
9457 if (name[0] == '*')
9458 name++;
9459
9460 if (is_local)
9461 {
9462 struct alpha_funcs *cfaf;
9463
9464 if (!alpha_funcs_tree)
9465 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9466 splay_tree_compare_pointers);
9467
9468 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9469
9470 cfaf->links = 0;
9471 cfaf->num = ++alpha_funcs_num;
9472
9473 splay_tree_insert (alpha_funcs_tree,
9474 (splay_tree_key) current_function_decl,
9475 (splay_tree_value) cfaf);
9476 }
9477
9478 if (alpha_links_tree)
9479 {
9480 /* Is this name already defined? */
9481
9482 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9483 if (node)
9484 {
9485 al = (struct alpha_links *) node->value;
9486 if (is_local)
9487 {
9488 /* Defined here but external assumed. */
9489 if (al->lkind == KIND_EXTERN)
9490 al->lkind = KIND_LOCAL;
9491 }
9492 else
9493 {
9494 /* Used here but unused assumed. */
9495 if (al->lkind == KIND_UNUSED)
9496 al->lkind = KIND_LOCAL;
9497 }
9498 return al->linkage;
9499 }
9500 }
9501 else
9502 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9503
9504 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9505 name = ggc_strdup (name);
9506
9507 /* Assume external if no definition. */
9508 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9509
9510 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9511 get_identifier (name);
9512
9513 /* Construct a SYMBOL_REF for us to call. */
9514 {
9515 size_t name_len = strlen (name);
9516 char *linksym = alloca (name_len + 6);
9517 linksym[0] = '$';
9518 memcpy (linksym + 1, name, name_len);
9519 memcpy (linksym + 1 + name_len, "..lk", 5);
9520 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9521 ggc_alloc_string (linksym, name_len + 5));
9522 }
9523
9524 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9525 (splay_tree_value) al);
9526
9527 return al->linkage;
9528 }
9529
9530 rtx
9531 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9532 {
9533 splay_tree_node cfunnode;
9534 struct alpha_funcs *cfaf;
9535 struct alpha_links *al;
9536 const char *name = XSTR (linkage, 0);
9537
9538 cfaf = (struct alpha_funcs *) 0;
9539 al = (struct alpha_links *) 0;
9540
9541 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9542 cfaf = (struct alpha_funcs *) cfunnode->value;
9543
9544 if (cfaf->links)
9545 {
9546 splay_tree_node lnode;
9547
9548 /* Is this name already defined? */
9549
9550 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9551 if (lnode)
9552 al = (struct alpha_links *) lnode->value;
9553 }
9554 else
9555 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9556
9557 if (!al)
9558 {
9559 size_t name_len;
9560 size_t buflen;
9561 char buf [512];
9562 char *linksym;
9563 splay_tree_node node = 0;
9564 struct alpha_links *anl;
9565
9566 if (name[0] == '*')
9567 name++;
9568
9569 name_len = strlen (name);
9570
9571 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9572 al->num = cfaf->num;
9573
9574 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9575 if (node)
9576 {
9577 anl = (struct alpha_links *) node->value;
9578 al->lkind = anl->lkind;
9579 }
9580
9581 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9582 buflen = strlen (buf);
9583 linksym = alloca (buflen + 1);
9584 memcpy (linksym, buf, buflen + 1);
9585
9586 al->linkage = gen_rtx_SYMBOL_REF
9587 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9588
9589 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9590 (splay_tree_value) al);
9591 }
9592
9593 if (rflag)
9594 al->rkind = KIND_CODEADDR;
9595 else
9596 al->rkind = KIND_LINKAGE;
9597
9598 if (lflag)
9599 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9600 else
9601 return al->linkage;
9602 }
9603
9604 static int
9605 alpha_write_one_linkage (splay_tree_node node, void *data)
9606 {
9607 const char *const name = (const char *) node->key;
9608 struct alpha_links *link = (struct alpha_links *) node->value;
9609 FILE *stream = (FILE *) data;
9610
9611 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9612 if (link->rkind == KIND_CODEADDR)
9613 {
9614 if (link->lkind == KIND_LOCAL)
9615 {
9616 /* Local and used */
9617 fprintf (stream, "\t.quad %s..en\n", name);
9618 }
9619 else
9620 {
9621 /* External and used, request code address. */
9622 fprintf (stream, "\t.code_address %s\n", name);
9623 }
9624 }
9625 else
9626 {
9627 if (link->lkind == KIND_LOCAL)
9628 {
9629 /* Local and used, build linkage pair. */
9630 fprintf (stream, "\t.quad %s..en\n", name);
9631 fprintf (stream, "\t.quad %s\n", name);
9632 }
9633 else
9634 {
9635 /* External and used, request linkage pair. */
9636 fprintf (stream, "\t.linkage %s\n", name);
9637 }
9638 }
9639
9640 return 0;
9641 }
9642
9643 static void
9644 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9645 {
9646 splay_tree_node node;
9647 struct alpha_funcs *func;
9648
9649 fprintf (stream, "\t.link\n");
9650 fprintf (stream, "\t.align 3\n");
9651 in_section = NULL;
9652
9653 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9654 func = (struct alpha_funcs *) node->value;
9655
9656 fputs ("\t.name ", stream);
9657 assemble_name (stream, funname);
9658 fputs ("..na\n", stream);
9659 ASM_OUTPUT_LABEL (stream, funname);
9660 fprintf (stream, "\t.pdesc ");
9661 assemble_name (stream, funname);
9662 fprintf (stream, "..en,%s\n",
9663 alpha_procedure_type == PT_STACK ? "stack"
9664 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9665
9666 if (func->links)
9667 {
9668 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9669 /* splay_tree_delete (func->links); */
9670 }
9671 }
9672
9673 /* Given a decl, a section name, and whether the decl initializer
9674 has relocs, choose attributes for the section. */
9675
9676 #define SECTION_VMS_OVERLAY SECTION_FORGET
9677 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9678 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9679
9680 static unsigned int
9681 vms_section_type_flags (tree decl, const char *name, int reloc)
9682 {
9683 unsigned int flags = default_section_type_flags (decl, name, reloc);
9684
9685 if (decl && DECL_ATTRIBUTES (decl)
9686 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9687 flags |= SECTION_VMS_OVERLAY;
9688 if (decl && DECL_ATTRIBUTES (decl)
9689 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9690 flags |= SECTION_VMS_GLOBAL;
9691 if (decl && DECL_ATTRIBUTES (decl)
9692 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9693 flags |= SECTION_VMS_INITIALIZE;
9694
9695 return flags;
9696 }
9697
9698 /* Switch to an arbitrary section NAME with attributes as specified
9699 by FLAGS. ALIGN specifies any known alignment requirements for
9700 the section; 0 if the default should be used. */
9701
9702 static void
9703 vms_asm_named_section (const char *name, unsigned int flags,
9704 tree decl ATTRIBUTE_UNUSED)
9705 {
9706 fputc ('\n', asm_out_file);
9707 fprintf (asm_out_file, ".section\t%s", name);
9708
9709 if (flags & SECTION_VMS_OVERLAY)
9710 fprintf (asm_out_file, ",OVR");
9711 if (flags & SECTION_VMS_GLOBAL)
9712 fprintf (asm_out_file, ",GBL");
9713 if (flags & SECTION_VMS_INITIALIZE)
9714 fprintf (asm_out_file, ",NOMOD");
9715 if (flags & SECTION_DEBUG)
9716 fprintf (asm_out_file, ",NOWRT");
9717
9718 fputc ('\n', asm_out_file);
9719 }
9720
9721 /* Record an element in the table of global constructors. SYMBOL is
9722 a SYMBOL_REF of the function to be called; PRIORITY is a number
9723 between 0 and MAX_INIT_PRIORITY.
9724
9725 Differs from default_ctors_section_asm_out_constructor in that the
9726 width of the .ctors entry is always 64 bits, rather than the 32 bits
9727 used by a normal pointer. */
9728
9729 static void
9730 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9731 {
9732 switch_to_section (ctors_section);
9733 assemble_align (BITS_PER_WORD);
9734 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9735 }
9736
9737 static void
9738 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9739 {
9740 switch_to_section (dtors_section);
9741 assemble_align (BITS_PER_WORD);
9742 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9743 }
9744 #else
9745
9746 rtx
9747 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9748 int is_local ATTRIBUTE_UNUSED)
9749 {
9750 return NULL_RTX;
9751 }
9752
9753 rtx
9754 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9755 tree cfundecl ATTRIBUTE_UNUSED,
9756 int lflag ATTRIBUTE_UNUSED,
9757 int rflag ATTRIBUTE_UNUSED)
9758 {
9759 return NULL_RTX;
9760 }
9761
9762 #endif /* TARGET_ABI_OPEN_VMS */
9763 \f
9764 #if TARGET_ABI_UNICOSMK
9765
9766 /* This evaluates to true if we do not know how to pass TYPE solely in
9767 registers. This is the case for all arguments that do not fit in two
9768 registers. */
9769
9770 static bool
9771 unicosmk_must_pass_in_stack (enum machine_mode mode, tree type)
9772 {
9773 if (type == NULL)
9774 return false;
9775
9776 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9777 return true;
9778 if (TREE_ADDRESSABLE (type))
9779 return true;
9780
9781 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9782 }
9783
9784 /* Define the offset between two registers, one to be eliminated, and the
9785 other its replacement, at the start of a routine. */
9786
9787 int
9788 unicosmk_initial_elimination_offset (int from, int to)
9789 {
9790 int fixed_size;
9791
9792 fixed_size = alpha_sa_size();
9793 if (fixed_size != 0)
9794 fixed_size += 48;
9795
9796 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9797 return -fixed_size;
9798 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9799 return 0;
9800 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9801 return (ALPHA_ROUND (current_function_outgoing_args_size)
9802 + ALPHA_ROUND (get_frame_size()));
9803 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9804 return (ALPHA_ROUND (fixed_size)
9805 + ALPHA_ROUND (get_frame_size()
9806 + current_function_outgoing_args_size));
9807 else
9808 gcc_unreachable ();
9809 }
9810
9811 /* Output the module name for .ident and .end directives. We have to strip
9812 directories and add make sure that the module name starts with a letter
9813 or '$'. */
9814
9815 static void
9816 unicosmk_output_module_name (FILE *file)
9817 {
9818 const char *name = lbasename (main_input_filename);
9819 unsigned len = strlen (name);
9820 char *clean_name = alloca (len + 2);
9821 char *ptr = clean_name;
9822
9823 /* CAM only accepts module names that start with a letter or '$'. We
9824 prefix the module name with a '$' if necessary. */
9825
9826 if (!ISALPHA (*name))
9827 *ptr++ = '$';
9828 memcpy (ptr, name, len + 1);
9829 clean_symbol_name (clean_name);
9830 fputs (clean_name, file);
9831 }
9832
9833 /* Output the definition of a common variable. */
9834
9835 void
9836 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9837 {
9838 tree name_tree;
9839 printf ("T3E__: common %s\n", name);
9840
9841 in_section = NULL;
9842 fputs("\t.endp\n\n\t.psect ", file);
9843 assemble_name(file, name);
9844 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9845 fprintf(file, "\t.byte\t0:%d\n", size);
9846
9847 /* Mark the symbol as defined in this module. */
9848 name_tree = get_identifier (name);
9849 TREE_ASM_WRITTEN (name_tree) = 1;
9850 }
9851
9852 #define SECTION_PUBLIC SECTION_MACH_DEP
9853 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9854 static int current_section_align;
9855
9856 /* A get_unnamed_section callback for switching to the text section. */
9857
9858 static void
9859 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9860 {
9861 static int count = 0;
9862 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9863 }
9864
9865 /* A get_unnamed_section callback for switching to the data section. */
9866
9867 static void
9868 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9869 {
9870 static int count = 1;
9871 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9872 }
9873
9874 /* Implement TARGET_ASM_INIT_SECTIONS.
9875
9876 The Cray assembler is really weird with respect to sections. It has only
9877 named sections and you can't reopen a section once it has been closed.
9878 This means that we have to generate unique names whenever we want to
9879 reenter the text or the data section. */
9880
9881 static void
9882 unicosmk_init_sections (void)
9883 {
9884 text_section = get_unnamed_section (SECTION_CODE,
9885 unicosmk_output_text_section_asm_op,
9886 NULL);
9887 data_section = get_unnamed_section (SECTION_WRITE,
9888 unicosmk_output_data_section_asm_op,
9889 NULL);
9890 readonly_data_section = data_section;
9891 }
9892
9893 static unsigned int
9894 unicosmk_section_type_flags (tree decl, const char *name,
9895 int reloc ATTRIBUTE_UNUSED)
9896 {
9897 unsigned int flags = default_section_type_flags (decl, name, reloc);
9898
9899 if (!decl)
9900 return flags;
9901
9902 if (TREE_CODE (decl) == FUNCTION_DECL)
9903 {
9904 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9905 if (align_functions_log > current_section_align)
9906 current_section_align = align_functions_log;
9907
9908 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9909 flags |= SECTION_MAIN;
9910 }
9911 else
9912 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9913
9914 if (TREE_PUBLIC (decl))
9915 flags |= SECTION_PUBLIC;
9916
9917 return flags;
9918 }
9919
9920 /* Generate a section name for decl and associate it with the
9921 declaration. */
9922
9923 static void
9924 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9925 {
9926 const char *name;
9927 int len;
9928
9929 gcc_assert (decl);
9930
9931 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9932 name = default_strip_name_encoding (name);
9933 len = strlen (name);
9934
9935 if (TREE_CODE (decl) == FUNCTION_DECL)
9936 {
9937 char *string;
9938
9939 /* It is essential that we prefix the section name here because
9940 otherwise the section names generated for constructors and
9941 destructors confuse collect2. */
9942
9943 string = alloca (len + 6);
9944 sprintf (string, "code@%s", name);
9945 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9946 }
9947 else if (TREE_PUBLIC (decl))
9948 DECL_SECTION_NAME (decl) = build_string (len, name);
9949 else
9950 {
9951 char *string;
9952
9953 string = alloca (len + 6);
9954 sprintf (string, "data@%s", name);
9955 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9956 }
9957 }
9958
9959 /* Switch to an arbitrary section NAME with attributes as specified
9960 by FLAGS. ALIGN specifies any known alignment requirements for
9961 the section; 0 if the default should be used. */
9962
9963 static void
9964 unicosmk_asm_named_section (const char *name, unsigned int flags,
9965 tree decl ATTRIBUTE_UNUSED)
9966 {
9967 const char *kind;
9968
9969 /* Close the previous section. */
9970
9971 fputs ("\t.endp\n\n", asm_out_file);
9972
9973 /* Find out what kind of section we are opening. */
9974
9975 if (flags & SECTION_MAIN)
9976 fputs ("\t.start\tmain\n", asm_out_file);
9977
9978 if (flags & SECTION_CODE)
9979 kind = "code";
9980 else if (flags & SECTION_PUBLIC)
9981 kind = "common";
9982 else
9983 kind = "data";
9984
9985 if (current_section_align != 0)
9986 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9987 current_section_align, kind);
9988 else
9989 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9990 }
9991
9992 static void
9993 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9994 {
9995 if (DECL_P (decl)
9996 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9997 unicosmk_unique_section (decl, 0);
9998 }
9999
10000 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10001 in code sections because .align fill unused space with zeroes. */
10002
10003 void
10004 unicosmk_output_align (FILE *file, int align)
10005 {
10006 if (inside_function)
10007 fprintf (file, "\tgcc@code@align\t%d\n", align);
10008 else
10009 fprintf (file, "\t.align\t%d\n", align);
10010 }
10011
10012 /* Add a case vector to the current function's list of deferred case
10013 vectors. Case vectors have to be put into a separate section because CAM
10014 does not allow data definitions in code sections. */
10015
10016 void
10017 unicosmk_defer_case_vector (rtx lab, rtx vec)
10018 {
10019 struct machine_function *machine = cfun->machine;
10020
10021 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10022 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10023 machine->addr_list);
10024 }
10025
10026 /* Output a case vector. */
10027
10028 static void
10029 unicosmk_output_addr_vec (FILE *file, rtx vec)
10030 {
10031 rtx lab = XEXP (vec, 0);
10032 rtx body = XEXP (vec, 1);
10033 int vlen = XVECLEN (body, 0);
10034 int idx;
10035
10036 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10037
10038 for (idx = 0; idx < vlen; idx++)
10039 {
10040 ASM_OUTPUT_ADDR_VEC_ELT
10041 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10042 }
10043 }
10044
10045 /* Output current function's deferred case vectors. */
10046
10047 static void
10048 unicosmk_output_deferred_case_vectors (FILE *file)
10049 {
10050 struct machine_function *machine = cfun->machine;
10051 rtx t;
10052
10053 if (machine->addr_list == NULL_RTX)
10054 return;
10055
10056 switch_to_section (data_section);
10057 for (t = machine->addr_list; t; t = XEXP (t, 1))
10058 unicosmk_output_addr_vec (file, XEXP (t, 0));
10059 }
10060
10061 /* Generate the name of the SSIB section for the current function. */
10062
10063 #define SSIB_PREFIX "__SSIB_"
10064 #define SSIB_PREFIX_LEN 7
10065
10066 static const char *
10067 unicosmk_ssib_name (void)
10068 {
10069 /* This is ok since CAM won't be able to deal with names longer than that
10070 anyway. */
10071
10072 static char name[256];
10073
10074 rtx x;
10075 const char *fnname;
10076 int len;
10077
10078 x = DECL_RTL (cfun->decl);
10079 gcc_assert (GET_CODE (x) == MEM);
10080 x = XEXP (x, 0);
10081 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10082 fnname = XSTR (x, 0);
10083
10084 len = strlen (fnname);
10085 if (len + SSIB_PREFIX_LEN > 255)
10086 len = 255 - SSIB_PREFIX_LEN;
10087
10088 strcpy (name, SSIB_PREFIX);
10089 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10090 name[len + SSIB_PREFIX_LEN] = 0;
10091
10092 return name;
10093 }
10094
10095 /* Set up the dynamic subprogram information block (DSIB) and update the
10096 frame pointer register ($15) for subroutines which have a frame. If the
10097 subroutine doesn't have a frame, simply increment $15. */
10098
10099 static void
10100 unicosmk_gen_dsib (unsigned long *imaskP)
10101 {
10102 if (alpha_procedure_type == PT_STACK)
10103 {
10104 const char *ssib_name;
10105 rtx mem;
10106
10107 /* Allocate 64 bytes for the DSIB. */
10108
10109 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10110 GEN_INT (-64))));
10111 emit_insn (gen_blockage ());
10112
10113 /* Save the return address. */
10114
10115 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10116 set_mem_alias_set (mem, alpha_sr_alias_set);
10117 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10118 (*imaskP) &= ~(1UL << REG_RA);
10119
10120 /* Save the old frame pointer. */
10121
10122 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10123 set_mem_alias_set (mem, alpha_sr_alias_set);
10124 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10125 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10126
10127 emit_insn (gen_blockage ());
10128
10129 /* Store the SSIB pointer. */
10130
10131 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10132 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10133 set_mem_alias_set (mem, alpha_sr_alias_set);
10134
10135 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10136 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10137 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10138
10139 /* Save the CIW index. */
10140
10141 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10142 set_mem_alias_set (mem, alpha_sr_alias_set);
10143 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10144
10145 emit_insn (gen_blockage ());
10146
10147 /* Set the new frame pointer. */
10148
10149 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10150 stack_pointer_rtx, GEN_INT (64))));
10151
10152 }
10153 else
10154 {
10155 /* Increment the frame pointer register to indicate that we do not
10156 have a frame. */
10157
10158 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10159 hard_frame_pointer_rtx, const1_rtx)));
10160 }
10161 }
10162
10163 /* Output the static subroutine information block for the current
10164 function. */
10165
10166 static void
10167 unicosmk_output_ssib (FILE *file, const char *fnname)
10168 {
10169 int len;
10170 int i;
10171 rtx x;
10172 rtx ciw;
10173 struct machine_function *machine = cfun->machine;
10174
10175 in_section = NULL;
10176 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10177 unicosmk_ssib_name ());
10178
10179 /* Some required stuff and the function name length. */
10180
10181 len = strlen (fnname);
10182 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10183
10184 /* Saved registers
10185 ??? We don't do that yet. */
10186
10187 fputs ("\t.quad\t0\n", file);
10188
10189 /* Function address. */
10190
10191 fputs ("\t.quad\t", file);
10192 assemble_name (file, fnname);
10193 putc ('\n', file);
10194
10195 fputs ("\t.quad\t0\n", file);
10196 fputs ("\t.quad\t0\n", file);
10197
10198 /* Function name.
10199 ??? We do it the same way Cray CC does it but this could be
10200 simplified. */
10201
10202 for( i = 0; i < len; i++ )
10203 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10204 if( (len % 8) == 0 )
10205 fputs ("\t.quad\t0\n", file);
10206 else
10207 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10208
10209 /* All call information words used in the function. */
10210
10211 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10212 {
10213 ciw = XEXP (x, 0);
10214 #if HOST_BITS_PER_WIDE_INT == 32
10215 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10216 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10217 #else
10218 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10219 #endif
10220 }
10221 }
10222
10223 /* Add a call information word (CIW) to the list of the current function's
10224 CIWs and return its index.
10225
10226 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10227
10228 rtx
10229 unicosmk_add_call_info_word (rtx x)
10230 {
10231 rtx node;
10232 struct machine_function *machine = cfun->machine;
10233
10234 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10235 if (machine->first_ciw == NULL_RTX)
10236 machine->first_ciw = node;
10237 else
10238 XEXP (machine->last_ciw, 1) = node;
10239
10240 machine->last_ciw = node;
10241 ++machine->ciw_count;
10242
10243 return GEN_INT (machine->ciw_count
10244 + strlen (current_function_name ())/8 + 5);
10245 }
10246
10247 /* The Cray assembler doesn't accept extern declarations for symbols which
10248 are defined in the same file. We have to keep track of all global
10249 symbols which are referenced and/or defined in a source file and output
10250 extern declarations for those which are referenced but not defined at
10251 the end of file. */
10252
10253 /* List of identifiers for which an extern declaration might have to be
10254 emitted. */
10255 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10256
10257 struct unicosmk_extern_list
10258 {
10259 struct unicosmk_extern_list *next;
10260 const char *name;
10261 };
10262
10263 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10264
10265 /* Output extern declarations which are required for every asm file. */
10266
10267 static void
10268 unicosmk_output_default_externs (FILE *file)
10269 {
10270 static const char *const externs[] =
10271 { "__T3E_MISMATCH" };
10272
10273 int i;
10274 int n;
10275
10276 n = ARRAY_SIZE (externs);
10277
10278 for (i = 0; i < n; i++)
10279 fprintf (file, "\t.extern\t%s\n", externs[i]);
10280 }
10281
10282 /* Output extern declarations for global symbols which are have been
10283 referenced but not defined. */
10284
10285 static void
10286 unicosmk_output_externs (FILE *file)
10287 {
10288 struct unicosmk_extern_list *p;
10289 const char *real_name;
10290 int len;
10291 tree name_tree;
10292
10293 len = strlen (user_label_prefix);
10294 for (p = unicosmk_extern_head; p != 0; p = p->next)
10295 {
10296 /* We have to strip the encoding and possibly remove user_label_prefix
10297 from the identifier in order to handle -fleading-underscore and
10298 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10299 real_name = default_strip_name_encoding (p->name);
10300 if (len && p->name[0] == '*'
10301 && !memcmp (real_name, user_label_prefix, len))
10302 real_name += len;
10303
10304 name_tree = get_identifier (real_name);
10305 if (! TREE_ASM_WRITTEN (name_tree))
10306 {
10307 TREE_ASM_WRITTEN (name_tree) = 1;
10308 fputs ("\t.extern\t", file);
10309 assemble_name (file, p->name);
10310 putc ('\n', file);
10311 }
10312 }
10313 }
10314
10315 /* Record an extern. */
10316
10317 void
10318 unicosmk_add_extern (const char *name)
10319 {
10320 struct unicosmk_extern_list *p;
10321
10322 p = (struct unicosmk_extern_list *)
10323 xmalloc (sizeof (struct unicosmk_extern_list));
10324 p->next = unicosmk_extern_head;
10325 p->name = name;
10326 unicosmk_extern_head = p;
10327 }
10328
10329 /* The Cray assembler generates incorrect code if identifiers which
10330 conflict with register names are used as instruction operands. We have
10331 to replace such identifiers with DEX expressions. */
10332
10333 /* Structure to collect identifiers which have been replaced by DEX
10334 expressions. */
10335 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10336
10337 struct unicosmk_dex {
10338 struct unicosmk_dex *next;
10339 const char *name;
10340 };
10341
10342 /* List of identifiers which have been replaced by DEX expressions. The DEX
10343 number is determined by the position in the list. */
10344
10345 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10346
10347 /* The number of elements in the DEX list. */
10348
10349 static int unicosmk_dex_count = 0;
10350
10351 /* Check if NAME must be replaced by a DEX expression. */
10352
10353 static int
10354 unicosmk_special_name (const char *name)
10355 {
10356 if (name[0] == '*')
10357 ++name;
10358
10359 if (name[0] == '$')
10360 ++name;
10361
10362 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10363 return 0;
10364
10365 switch (name[1])
10366 {
10367 case '1': case '2':
10368 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10369
10370 case '3':
10371 return (name[2] == '\0'
10372 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10373
10374 default:
10375 return (ISDIGIT (name[1]) && name[2] == '\0');
10376 }
10377 }
10378
10379 /* Return the DEX number if X must be replaced by a DEX expression and 0
10380 otherwise. */
10381
10382 static int
10383 unicosmk_need_dex (rtx x)
10384 {
10385 struct unicosmk_dex *dex;
10386 const char *name;
10387 int i;
10388
10389 if (GET_CODE (x) != SYMBOL_REF)
10390 return 0;
10391
10392 name = XSTR (x,0);
10393 if (! unicosmk_special_name (name))
10394 return 0;
10395
10396 i = unicosmk_dex_count;
10397 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10398 {
10399 if (! strcmp (name, dex->name))
10400 return i;
10401 --i;
10402 }
10403
10404 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10405 dex->name = name;
10406 dex->next = unicosmk_dex_list;
10407 unicosmk_dex_list = dex;
10408
10409 ++unicosmk_dex_count;
10410 return unicosmk_dex_count;
10411 }
10412
10413 /* Output the DEX definitions for this file. */
10414
10415 static void
10416 unicosmk_output_dex (FILE *file)
10417 {
10418 struct unicosmk_dex *dex;
10419 int i;
10420
10421 if (unicosmk_dex_list == NULL)
10422 return;
10423
10424 fprintf (file, "\t.dexstart\n");
10425
10426 i = unicosmk_dex_count;
10427 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10428 {
10429 fprintf (file, "\tDEX (%d) = ", i);
10430 assemble_name (file, dex->name);
10431 putc ('\n', file);
10432 --i;
10433 }
10434
10435 fprintf (file, "\t.dexend\n");
10436 }
10437
10438 /* Output text that to appear at the beginning of an assembler file. */
10439
10440 static void
10441 unicosmk_file_start (void)
10442 {
10443 int i;
10444
10445 fputs ("\t.ident\t", asm_out_file);
10446 unicosmk_output_module_name (asm_out_file);
10447 fputs ("\n\n", asm_out_file);
10448
10449 /* The Unicos/Mk assembler uses different register names. Instead of trying
10450 to support them, we simply use micro definitions. */
10451
10452 /* CAM has different register names: rN for the integer register N and fN
10453 for the floating-point register N. Instead of trying to use these in
10454 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10455 register. */
10456
10457 for (i = 0; i < 32; ++i)
10458 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10459
10460 for (i = 0; i < 32; ++i)
10461 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10462
10463 putc ('\n', asm_out_file);
10464
10465 /* The .align directive fill unused space with zeroes which does not work
10466 in code sections. We define the macro 'gcc@code@align' which uses nops
10467 instead. Note that it assumes that code sections always have the
10468 biggest possible alignment since . refers to the current offset from
10469 the beginning of the section. */
10470
10471 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10472 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10473 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10474 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10475 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10476 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10477 fputs ("\t.endr\n", asm_out_file);
10478 fputs ("\t.endif\n", asm_out_file);
10479 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10480
10481 /* Output extern declarations which should always be visible. */
10482 unicosmk_output_default_externs (asm_out_file);
10483
10484 /* Open a dummy section. We always need to be inside a section for the
10485 section-switching code to work correctly.
10486 ??? This should be a module id or something like that. I still have to
10487 figure out what the rules for those are. */
10488 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10489 }
10490
10491 /* Output text to appear at the end of an assembler file. This includes all
10492 pending extern declarations and DEX expressions. */
10493
10494 static void
10495 unicosmk_file_end (void)
10496 {
10497 fputs ("\t.endp\n\n", asm_out_file);
10498
10499 /* Output all pending externs. */
10500
10501 unicosmk_output_externs (asm_out_file);
10502
10503 /* Output dex definitions used for functions whose names conflict with
10504 register names. */
10505
10506 unicosmk_output_dex (asm_out_file);
10507
10508 fputs ("\t.end\t", asm_out_file);
10509 unicosmk_output_module_name (asm_out_file);
10510 putc ('\n', asm_out_file);
10511 }
10512
10513 #else
10514
10515 static void
10516 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10517 {}
10518
10519 static void
10520 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10521 {}
10522
10523 static void
10524 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10525 const char * fnname ATTRIBUTE_UNUSED)
10526 {}
10527
10528 rtx
10529 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10530 {
10531 return NULL_RTX;
10532 }
10533
10534 static int
10535 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10536 {
10537 return 0;
10538 }
10539
10540 #endif /* TARGET_ABI_UNICOSMK */
10541
10542 static void
10543 alpha_init_libfuncs (void)
10544 {
10545 if (TARGET_ABI_UNICOSMK)
10546 {
10547 /* Prevent gcc from generating calls to __divsi3. */
10548 set_optab_libfunc (sdiv_optab, SImode, 0);
10549 set_optab_libfunc (udiv_optab, SImode, 0);
10550
10551 /* Use the functions provided by the system library
10552 for DImode integer division. */
10553 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10554 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10555 }
10556 else if (TARGET_ABI_OPEN_VMS)
10557 {
10558 /* Use the VMS runtime library functions for division and
10559 remainder. */
10560 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10561 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10562 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10563 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10564 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10565 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10566 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10567 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10568 }
10569 }
10570
10571 \f
10572 /* Initialize the GCC target structure. */
10573 #if TARGET_ABI_OPEN_VMS
10574 # undef TARGET_ATTRIBUTE_TABLE
10575 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10576 # undef TARGET_SECTION_TYPE_FLAGS
10577 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10578 #endif
10579
10580 #undef TARGET_IN_SMALL_DATA_P
10581 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10582
10583 #if TARGET_ABI_UNICOSMK
10584 # undef TARGET_INSERT_ATTRIBUTES
10585 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10586 # undef TARGET_SECTION_TYPE_FLAGS
10587 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10588 # undef TARGET_ASM_UNIQUE_SECTION
10589 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10590 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10591 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10592 # undef TARGET_ASM_GLOBALIZE_LABEL
10593 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10594 # undef TARGET_MUST_PASS_IN_STACK
10595 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10596 #endif
10597
10598 #undef TARGET_ASM_ALIGNED_HI_OP
10599 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10600 #undef TARGET_ASM_ALIGNED_DI_OP
10601 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10602
10603 /* Default unaligned ops are provided for ELF systems. To get unaligned
10604 data for non-ELF systems, we have to turn off auto alignment. */
10605 #ifndef OBJECT_FORMAT_ELF
10606 #undef TARGET_ASM_UNALIGNED_HI_OP
10607 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10608 #undef TARGET_ASM_UNALIGNED_SI_OP
10609 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10610 #undef TARGET_ASM_UNALIGNED_DI_OP
10611 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10612 #endif
10613
10614 #ifdef OBJECT_FORMAT_ELF
10615 #undef TARGET_ASM_RELOC_RW_MASK
10616 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10617 #undef TARGET_ASM_SELECT_RTX_SECTION
10618 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10619 #undef TARGET_SECTION_TYPE_FLAGS
10620 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10621 #endif
10622
10623 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10624 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10625
10626 #undef TARGET_INIT_LIBFUNCS
10627 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10628
10629 #if TARGET_ABI_UNICOSMK
10630 #undef TARGET_ASM_FILE_START
10631 #define TARGET_ASM_FILE_START unicosmk_file_start
10632 #undef TARGET_ASM_FILE_END
10633 #define TARGET_ASM_FILE_END unicosmk_file_end
10634 #else
10635 #undef TARGET_ASM_FILE_START
10636 #define TARGET_ASM_FILE_START alpha_file_start
10637 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10638 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10639 #endif
10640
10641 #undef TARGET_SCHED_ADJUST_COST
10642 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10643 #undef TARGET_SCHED_ISSUE_RATE
10644 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10645 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10646 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10647 alpha_multipass_dfa_lookahead
10648
10649 #undef TARGET_HAVE_TLS
10650 #define TARGET_HAVE_TLS HAVE_AS_TLS
10651
10652 #undef TARGET_INIT_BUILTINS
10653 #define TARGET_INIT_BUILTINS alpha_init_builtins
10654 #undef TARGET_EXPAND_BUILTIN
10655 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10656 #undef TARGET_FOLD_BUILTIN
10657 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10658
10659 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10660 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10661 #undef TARGET_CANNOT_COPY_INSN_P
10662 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10663 #undef TARGET_CANNOT_FORCE_CONST_MEM
10664 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10665
10666 #if TARGET_ABI_OSF
10667 #undef TARGET_ASM_OUTPUT_MI_THUNK
10668 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10669 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10670 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
10671 #undef TARGET_STDARG_OPTIMIZE_HOOK
10672 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10673 #endif
10674
10675 #undef TARGET_RTX_COSTS
10676 #define TARGET_RTX_COSTS alpha_rtx_costs
10677 #undef TARGET_ADDRESS_COST
10678 #define TARGET_ADDRESS_COST hook_int_rtx_0
10679
10680 #undef TARGET_MACHINE_DEPENDENT_REORG
10681 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10682
10683 #undef TARGET_PROMOTE_FUNCTION_ARGS
10684 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
10685 #undef TARGET_PROMOTE_FUNCTION_RETURN
10686 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
10687 #undef TARGET_PROMOTE_PROTOTYPES
10688 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_false
10689 #undef TARGET_RETURN_IN_MEMORY
10690 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10691 #undef TARGET_PASS_BY_REFERENCE
10692 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10693 #undef TARGET_SETUP_INCOMING_VARARGS
10694 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10695 #undef TARGET_STRICT_ARGUMENT_NAMING
10696 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10697 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10698 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10699 #undef TARGET_SPLIT_COMPLEX_ARG
10700 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10701 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10702 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10703 #undef TARGET_ARG_PARTIAL_BYTES
10704 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10705
10706 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10707 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10708 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10709 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10710
10711 #undef TARGET_BUILD_BUILTIN_VA_LIST
10712 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10713
10714 /* The Alpha architecture does not require sequential consistency. See
10715 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10716 for an example of how it can be violated in practice. */
10717 #undef TARGET_RELAXED_ORDERING
10718 #define TARGET_RELAXED_ORDERING true
10719
10720 #undef TARGET_DEFAULT_TARGET_FLAGS
10721 #define TARGET_DEFAULT_TARGET_FLAGS \
10722 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10723 #undef TARGET_HANDLE_OPTION
10724 #define TARGET_HANDLE_OPTION alpha_handle_option
10725
10726 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10727 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
10728 #define TARGET_MANGLE_FUNDAMENTAL_TYPE alpha_mangle_fundamental_type
10729 #endif
10730
10731 struct gcc_target targetm = TARGET_INITIALIZER;
10732
10733 \f
10734 #include "gt-alpha.h"