alpha.c (alpha_end_function): Do not clear crtl->emit structure and free insn locator...
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60 #include "libfuncs.h"
61
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
64
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
67
68 static const char * const alpha_cpu_name[] =
69 {
70 "ev4", "ev5", "ev6"
71 };
72
73 /* Specify how accurate floating-point traps need to be. */
74
75 enum alpha_trap_precision alpha_tp;
76
77 /* Specify the floating-point rounding mode. */
78
79 enum alpha_fp_rounding_mode alpha_fprm;
80
81 /* Specify which things cause traps. */
82
83 enum alpha_fp_trap_mode alpha_fptm;
84
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
87
88 static int inside_function = FALSE;
89
90 /* The number of cycles of latency we should assume on memory reads. */
91
92 int alpha_memory_latency = 3;
93
94 /* Whether the function needs the GP. */
95
96 static int alpha_function_needs_gp;
97
98 /* The alias set for prologue/epilogue register save/restore. */
99
100 static GTY(()) alias_set_type alpha_sr_alias_set;
101
102 /* The assembler name of the current function. */
103
104 static const char *alpha_fnname;
105
106 /* The next explicit relocation sequence number. */
107 extern GTY(()) int alpha_next_sequence_number;
108 int alpha_next_sequence_number = 1;
109
110 /* The literal and gpdisp sequence numbers for this insn, as printed
111 by %# and %* respectively. */
112 extern GTY(()) int alpha_this_literal_sequence_number;
113 extern GTY(()) int alpha_this_gpdisp_sequence_number;
114 int alpha_this_literal_sequence_number;
115 int alpha_this_gpdisp_sequence_number;
116
117 /* Costs of various operations on the different architectures. */
118
119 struct alpha_rtx_cost_data
120 {
121 unsigned char fp_add;
122 unsigned char fp_mult;
123 unsigned char fp_div_sf;
124 unsigned char fp_div_df;
125 unsigned char int_mult_si;
126 unsigned char int_mult_di;
127 unsigned char int_shift;
128 unsigned char int_cmov;
129 unsigned short int_div;
130 };
131
132 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
133 {
134 { /* EV4 */
135 COSTS_N_INSNS (6), /* fp_add */
136 COSTS_N_INSNS (6), /* fp_mult */
137 COSTS_N_INSNS (34), /* fp_div_sf */
138 COSTS_N_INSNS (63), /* fp_div_df */
139 COSTS_N_INSNS (23), /* int_mult_si */
140 COSTS_N_INSNS (23), /* int_mult_di */
141 COSTS_N_INSNS (2), /* int_shift */
142 COSTS_N_INSNS (2), /* int_cmov */
143 COSTS_N_INSNS (97), /* int_div */
144 },
145 { /* EV5 */
146 COSTS_N_INSNS (4), /* fp_add */
147 COSTS_N_INSNS (4), /* fp_mult */
148 COSTS_N_INSNS (15), /* fp_div_sf */
149 COSTS_N_INSNS (22), /* fp_div_df */
150 COSTS_N_INSNS (8), /* int_mult_si */
151 COSTS_N_INSNS (12), /* int_mult_di */
152 COSTS_N_INSNS (1) + 1, /* int_shift */
153 COSTS_N_INSNS (1), /* int_cmov */
154 COSTS_N_INSNS (83), /* int_div */
155 },
156 { /* EV6 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (12), /* fp_div_sf */
160 COSTS_N_INSNS (15), /* fp_div_df */
161 COSTS_N_INSNS (7), /* int_mult_si */
162 COSTS_N_INSNS (7), /* int_mult_di */
163 COSTS_N_INSNS (1), /* int_shift */
164 COSTS_N_INSNS (2), /* int_cmov */
165 COSTS_N_INSNS (86), /* int_div */
166 },
167 };
168
169 /* Similar but tuned for code size instead of execution latency. The
170 extra +N is fractional cost tuning based on latency. It's used to
171 encourage use of cheaper insns like shift, but only if there's just
172 one of them. */
173
174 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175 {
176 COSTS_N_INSNS (1), /* fp_add */
177 COSTS_N_INSNS (1), /* fp_mult */
178 COSTS_N_INSNS (1), /* fp_div_sf */
179 COSTS_N_INSNS (1) + 1, /* fp_div_df */
180 COSTS_N_INSNS (1) + 1, /* int_mult_si */
181 COSTS_N_INSNS (1) + 2, /* int_mult_di */
182 COSTS_N_INSNS (1), /* int_shift */
183 COSTS_N_INSNS (1), /* int_cmov */
184 COSTS_N_INSNS (6), /* int_div */
185 };
186
187 /* Get the number of args of a function in one of two ways. */
188 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
189 #define NUM_ARGS crtl->args.info.num_args
190 #else
191 #define NUM_ARGS crtl->args.info
192 #endif
193
194 #define REG_PV 27
195 #define REG_RA 26
196
197 /* Declarations of static functions. */
198 static struct machine_function *alpha_init_machine_status (void);
199 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200
201 #if TARGET_ABI_OPEN_VMS
202 static void alpha_write_linkage (FILE *, const char *, tree);
203 static bool vms_valid_pointer_mode (enum machine_mode);
204 #endif
205
206 static void unicosmk_output_deferred_case_vectors (FILE *);
207 static void unicosmk_gen_dsib (unsigned long *);
208 static void unicosmk_output_ssib (FILE *, const char *);
209 static int unicosmk_need_dex (rtx);
210 \f
211 /* Implement TARGET_HANDLE_OPTION. */
212
213 static bool
214 alpha_handle_option (size_t code, const char *arg, int value)
215 {
216 switch (code)
217 {
218 case OPT_mfp_regs:
219 if (value == 0)
220 target_flags |= MASK_SOFT_FP;
221 break;
222
223 case OPT_mieee:
224 case OPT_mieee_with_inexact:
225 target_flags |= MASK_IEEE_CONFORMANT;
226 break;
227
228 case OPT_mtls_size_:
229 if (value != 16 && value != 32 && value != 64)
230 error ("bad value %qs for -mtls-size switch", arg);
231 break;
232 }
233
234 return true;
235 }
236
237 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
238 /* Implement TARGET_MANGLE_TYPE. */
239
240 static const char *
241 alpha_mangle_type (const_tree type)
242 {
243 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
244 && TARGET_LONG_DOUBLE_128)
245 return "g";
246
247 /* For all other types, use normal C++ mangling. */
248 return NULL;
249 }
250 #endif
251
252 /* Parse target option strings. */
253
254 void
255 override_options (void)
256 {
257 static const struct cpu_table {
258 const char *const name;
259 const enum processor_type processor;
260 const int flags;
261 } cpu_table[] = {
262 { "ev4", PROCESSOR_EV4, 0 },
263 { "ev45", PROCESSOR_EV4, 0 },
264 { "21064", PROCESSOR_EV4, 0 },
265 { "ev5", PROCESSOR_EV5, 0 },
266 { "21164", PROCESSOR_EV5, 0 },
267 { "ev56", PROCESSOR_EV5, MASK_BWX },
268 { "21164a", PROCESSOR_EV5, MASK_BWX },
269 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
271 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
273 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
274 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
275 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
276 };
277
278 int const ct_size = ARRAY_SIZE (cpu_table);
279 int i;
280
281 /* Unicos/Mk doesn't have shared libraries. */
282 if (TARGET_ABI_UNICOSMK && flag_pic)
283 {
284 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
285 (flag_pic > 1) ? "PIC" : "pic");
286 flag_pic = 0;
287 }
288
289 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
290 floating-point instructions. Make that the default for this target. */
291 if (TARGET_ABI_UNICOSMK)
292 alpha_fprm = ALPHA_FPRM_DYN;
293 else
294 alpha_fprm = ALPHA_FPRM_NORM;
295
296 alpha_tp = ALPHA_TP_PROG;
297 alpha_fptm = ALPHA_FPTM_N;
298
299 /* We cannot use su and sui qualifiers for conversion instructions on
300 Unicos/Mk. I'm not sure if this is due to assembler or hardware
301 limitations. Right now, we issue a warning if -mieee is specified
302 and then ignore it; eventually, we should either get it right or
303 disable the option altogether. */
304
305 if (TARGET_IEEE)
306 {
307 if (TARGET_ABI_UNICOSMK)
308 warning (0, "-mieee not supported on Unicos/Mk");
309 else
310 {
311 alpha_tp = ALPHA_TP_INSN;
312 alpha_fptm = ALPHA_FPTM_SU;
313 }
314 }
315
316 if (TARGET_IEEE_WITH_INEXACT)
317 {
318 if (TARGET_ABI_UNICOSMK)
319 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
320 else
321 {
322 alpha_tp = ALPHA_TP_INSN;
323 alpha_fptm = ALPHA_FPTM_SUI;
324 }
325 }
326
327 if (alpha_tp_string)
328 {
329 if (! strcmp (alpha_tp_string, "p"))
330 alpha_tp = ALPHA_TP_PROG;
331 else if (! strcmp (alpha_tp_string, "f"))
332 alpha_tp = ALPHA_TP_FUNC;
333 else if (! strcmp (alpha_tp_string, "i"))
334 alpha_tp = ALPHA_TP_INSN;
335 else
336 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
337 }
338
339 if (alpha_fprm_string)
340 {
341 if (! strcmp (alpha_fprm_string, "n"))
342 alpha_fprm = ALPHA_FPRM_NORM;
343 else if (! strcmp (alpha_fprm_string, "m"))
344 alpha_fprm = ALPHA_FPRM_MINF;
345 else if (! strcmp (alpha_fprm_string, "c"))
346 alpha_fprm = ALPHA_FPRM_CHOP;
347 else if (! strcmp (alpha_fprm_string,"d"))
348 alpha_fprm = ALPHA_FPRM_DYN;
349 else
350 error ("bad value %qs for -mfp-rounding-mode switch",
351 alpha_fprm_string);
352 }
353
354 if (alpha_fptm_string)
355 {
356 if (strcmp (alpha_fptm_string, "n") == 0)
357 alpha_fptm = ALPHA_FPTM_N;
358 else if (strcmp (alpha_fptm_string, "u") == 0)
359 alpha_fptm = ALPHA_FPTM_U;
360 else if (strcmp (alpha_fptm_string, "su") == 0)
361 alpha_fptm = ALPHA_FPTM_SU;
362 else if (strcmp (alpha_fptm_string, "sui") == 0)
363 alpha_fptm = ALPHA_FPTM_SUI;
364 else
365 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
366 }
367
368 if (alpha_cpu_string)
369 {
370 for (i = 0; i < ct_size; i++)
371 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
372 {
373 alpha_tune = alpha_cpu = cpu_table [i].processor;
374 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
375 target_flags |= cpu_table [i].flags;
376 break;
377 }
378 if (i == ct_size)
379 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
380 }
381
382 if (alpha_tune_string)
383 {
384 for (i = 0; i < ct_size; i++)
385 if (! strcmp (alpha_tune_string, cpu_table [i].name))
386 {
387 alpha_tune = cpu_table [i].processor;
388 break;
389 }
390 if (i == ct_size)
391 error ("bad value %qs for -mcpu switch", alpha_tune_string);
392 }
393
394 /* Do some sanity checks on the above options. */
395
396 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
397 {
398 warning (0, "trap mode not supported on Unicos/Mk");
399 alpha_fptm = ALPHA_FPTM_N;
400 }
401
402 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
403 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
404 {
405 warning (0, "fp software completion requires -mtrap-precision=i");
406 alpha_tp = ALPHA_TP_INSN;
407 }
408
409 if (alpha_cpu == PROCESSOR_EV6)
410 {
411 /* Except for EV6 pass 1 (not released), we always have precise
412 arithmetic traps. Which means we can do software completion
413 without minding trap shadows. */
414 alpha_tp = ALPHA_TP_PROG;
415 }
416
417 if (TARGET_FLOAT_VAX)
418 {
419 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
420 {
421 warning (0, "rounding mode not supported for VAX floats");
422 alpha_fprm = ALPHA_FPRM_NORM;
423 }
424 if (alpha_fptm == ALPHA_FPTM_SUI)
425 {
426 warning (0, "trap mode not supported for VAX floats");
427 alpha_fptm = ALPHA_FPTM_SU;
428 }
429 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
430 warning (0, "128-bit long double not supported for VAX floats");
431 target_flags &= ~MASK_LONG_DOUBLE_128;
432 }
433
434 {
435 char *end;
436 int lat;
437
438 if (!alpha_mlat_string)
439 alpha_mlat_string = "L1";
440
441 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
442 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
443 ;
444 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
445 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
446 && alpha_mlat_string[2] == '\0')
447 {
448 static int const cache_latency[][4] =
449 {
450 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
451 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
452 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
453 };
454
455 lat = alpha_mlat_string[1] - '0';
456 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
457 {
458 warning (0, "L%d cache latency unknown for %s",
459 lat, alpha_cpu_name[alpha_tune]);
460 lat = 3;
461 }
462 else
463 lat = cache_latency[alpha_tune][lat-1];
464 }
465 else if (! strcmp (alpha_mlat_string, "main"))
466 {
467 /* Most current memories have about 370ns latency. This is
468 a reasonable guess for a fast cpu. */
469 lat = 150;
470 }
471 else
472 {
473 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
474 lat = 3;
475 }
476
477 alpha_memory_latency = lat;
478 }
479
480 /* Default the definition of "small data" to 8 bytes. */
481 if (!g_switch_set)
482 g_switch_value = 8;
483
484 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
485 if (flag_pic == 1)
486 target_flags |= MASK_SMALL_DATA;
487 else if (flag_pic == 2)
488 target_flags &= ~MASK_SMALL_DATA;
489
490 /* Align labels and loops for optimal branching. */
491 /* ??? Kludge these by not doing anything if we don't optimize and also if
492 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
493 if (optimize > 0 && write_symbols != SDB_DEBUG)
494 {
495 if (align_loops <= 0)
496 align_loops = 16;
497 if (align_jumps <= 0)
498 align_jumps = 16;
499 }
500 if (align_functions <= 0)
501 align_functions = 16;
502
503 /* Acquire a unique set number for our register saves and restores. */
504 alpha_sr_alias_set = new_alias_set ();
505
506 /* Register variables and functions with the garbage collector. */
507
508 /* Set up function hooks. */
509 init_machine_status = alpha_init_machine_status;
510
511 /* Tell the compiler when we're using VAX floating point. */
512 if (TARGET_FLOAT_VAX)
513 {
514 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
515 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
516 REAL_MODE_FORMAT (TFmode) = NULL;
517 }
518
519 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
520 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
521 target_flags |= MASK_LONG_DOUBLE_128;
522 #endif
523
524 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
525 can be optimized to ap = __builtin_next_arg (0). */
526 if (TARGET_ABI_UNICOSMK)
527 targetm.expand_builtin_va_start = NULL;
528 }
529 \f
530 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
531
532 int
533 zap_mask (HOST_WIDE_INT value)
534 {
535 int i;
536
537 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
538 i++, value >>= 8)
539 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
540 return 0;
541
542 return 1;
543 }
544
545 /* Return true if OP is valid for a particular TLS relocation.
546 We are already guaranteed that OP is a CONST. */
547
548 int
549 tls_symbolic_operand_1 (rtx op, int size, int unspec)
550 {
551 op = XEXP (op, 0);
552
553 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
554 return 0;
555 op = XVECEXP (op, 0, 0);
556
557 if (GET_CODE (op) != SYMBOL_REF)
558 return 0;
559
560 switch (SYMBOL_REF_TLS_MODEL (op))
561 {
562 case TLS_MODEL_LOCAL_DYNAMIC:
563 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
564 case TLS_MODEL_INITIAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == 64;
566 case TLS_MODEL_LOCAL_EXEC:
567 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
568 default:
569 gcc_unreachable ();
570 }
571 }
572
573 /* Used by aligned_memory_operand and unaligned_memory_operand to
574 resolve what reload is going to do with OP if it's a register. */
575
576 rtx
577 resolve_reload_operand (rtx op)
578 {
579 if (reload_in_progress)
580 {
581 rtx tmp = op;
582 if (GET_CODE (tmp) == SUBREG)
583 tmp = SUBREG_REG (tmp);
584 if (REG_P (tmp)
585 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
586 {
587 op = reg_equiv_memory_loc[REGNO (tmp)];
588 if (op == 0)
589 return 0;
590 }
591 }
592 return op;
593 }
594
595 /* The scalar modes supported differs from the default check-what-c-supports
596 version in that sometimes TFmode is available even when long double
597 indicates only DFmode. On unicosmk, we have the situation that HImode
598 doesn't map to any C type, but of course we still support that. */
599
600 static bool
601 alpha_scalar_mode_supported_p (enum machine_mode mode)
602 {
603 switch (mode)
604 {
605 case QImode:
606 case HImode:
607 case SImode:
608 case DImode:
609 case TImode: /* via optabs.c */
610 return true;
611
612 case SFmode:
613 case DFmode:
614 return true;
615
616 case TFmode:
617 return TARGET_HAS_XFLOATING_LIBS;
618
619 default:
620 return false;
621 }
622 }
623
624 /* Alpha implements a couple of integer vector mode operations when
625 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
626 which allows the vectorizer to operate on e.g. move instructions,
627 or when expand_vector_operations can do something useful. */
628
629 static bool
630 alpha_vector_mode_supported_p (enum machine_mode mode)
631 {
632 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
633 }
634
635 /* Return 1 if this function can directly return via $26. */
636
637 int
638 direct_return (void)
639 {
640 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
641 && reload_completed
642 && alpha_sa_size () == 0
643 && get_frame_size () == 0
644 && crtl->outgoing_args_size == 0
645 && crtl->args.pretend_args_size == 0);
646 }
647
648 /* Return the ADDR_VEC associated with a tablejump insn. */
649
650 rtx
651 alpha_tablejump_addr_vec (rtx insn)
652 {
653 rtx tmp;
654
655 tmp = JUMP_LABEL (insn);
656 if (!tmp)
657 return NULL_RTX;
658 tmp = NEXT_INSN (tmp);
659 if (!tmp)
660 return NULL_RTX;
661 if (JUMP_P (tmp)
662 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
663 return PATTERN (tmp);
664 return NULL_RTX;
665 }
666
667 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
668
669 rtx
670 alpha_tablejump_best_label (rtx insn)
671 {
672 rtx jump_table = alpha_tablejump_addr_vec (insn);
673 rtx best_label = NULL_RTX;
674
675 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
676 there for edge frequency counts from profile data. */
677
678 if (jump_table)
679 {
680 int n_labels = XVECLEN (jump_table, 1);
681 int best_count = -1;
682 int i, j;
683
684 for (i = 0; i < n_labels; i++)
685 {
686 int count = 1;
687
688 for (j = i + 1; j < n_labels; j++)
689 if (XEXP (XVECEXP (jump_table, 1, i), 0)
690 == XEXP (XVECEXP (jump_table, 1, j), 0))
691 count++;
692
693 if (count > best_count)
694 best_count = count, best_label = XVECEXP (jump_table, 1, i);
695 }
696 }
697
698 return best_label ? best_label : const0_rtx;
699 }
700
701 /* Return the TLS model to use for SYMBOL. */
702
703 static enum tls_model
704 tls_symbolic_operand_type (rtx symbol)
705 {
706 enum tls_model model;
707
708 if (GET_CODE (symbol) != SYMBOL_REF)
709 return TLS_MODEL_NONE;
710 model = SYMBOL_REF_TLS_MODEL (symbol);
711
712 /* Local-exec with a 64-bit size is the same code as initial-exec. */
713 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
714 model = TLS_MODEL_INITIAL_EXEC;
715
716 return model;
717 }
718 \f
719 /* Return true if the function DECL will share the same GP as any
720 function in the current unit of translation. */
721
722 static bool
723 decl_has_samegp (const_tree decl)
724 {
725 /* Functions that are not local can be overridden, and thus may
726 not share the same gp. */
727 if (!(*targetm.binds_local_p) (decl))
728 return false;
729
730 /* If -msmall-data is in effect, assume that there is only one GP
731 for the module, and so any local symbol has this property. We
732 need explicit relocations to be able to enforce this for symbols
733 not defined in this unit of translation, however. */
734 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
735 return true;
736
737 /* Functions that are not external are defined in this UoT. */
738 /* ??? Irritatingly, static functions not yet emitted are still
739 marked "external". Apply this to non-static functions only. */
740 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
741 }
742
743 /* Return true if EXP should be placed in the small data section. */
744
745 static bool
746 alpha_in_small_data_p (const_tree exp)
747 {
748 /* We want to merge strings, so we never consider them small data. */
749 if (TREE_CODE (exp) == STRING_CST)
750 return false;
751
752 /* Functions are never in the small data area. Duh. */
753 if (TREE_CODE (exp) == FUNCTION_DECL)
754 return false;
755
756 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
757 {
758 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
759 if (strcmp (section, ".sdata") == 0
760 || strcmp (section, ".sbss") == 0)
761 return true;
762 }
763 else
764 {
765 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
766
767 /* If this is an incomplete type with size 0, then we can't put it
768 in sdata because it might be too big when completed. */
769 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
770 return true;
771 }
772
773 return false;
774 }
775
776 #if TARGET_ABI_OPEN_VMS
777 static bool
778 vms_valid_pointer_mode (enum machine_mode mode)
779 {
780 return (mode == SImode || mode == DImode);
781 }
782
783 static bool
784 alpha_linkage_symbol_p (const char *symname)
785 {
786 int symlen = strlen (symname);
787
788 if (symlen > 4)
789 return strcmp (&symname [symlen - 4], "..lk") == 0;
790
791 return false;
792 }
793
794 #define LINKAGE_SYMBOL_REF_P(X) \
795 ((GET_CODE (X) == SYMBOL_REF \
796 && alpha_linkage_symbol_p (XSTR (X, 0))) \
797 || (GET_CODE (X) == CONST \
798 && GET_CODE (XEXP (X, 0)) == PLUS \
799 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
800 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
801 #endif
802
803 /* legitimate_address_p recognizes an RTL expression that is a valid
804 memory address for an instruction. The MODE argument is the
805 machine mode for the MEM expression that wants to use this address.
806
807 For Alpha, we have either a constant address or the sum of a
808 register and a constant address, or just a register. For DImode,
809 any of those forms can be surrounded with an AND that clear the
810 low-order three bits; this is an "unaligned" access. */
811
812 static bool
813 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
814 {
815 /* If this is an ldq_u type address, discard the outer AND. */
816 if (mode == DImode
817 && GET_CODE (x) == AND
818 && CONST_INT_P (XEXP (x, 1))
819 && INTVAL (XEXP (x, 1)) == -8)
820 x = XEXP (x, 0);
821
822 /* Discard non-paradoxical subregs. */
823 if (GET_CODE (x) == SUBREG
824 && (GET_MODE_SIZE (GET_MODE (x))
825 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
826 x = SUBREG_REG (x);
827
828 /* Unadorned general registers are valid. */
829 if (REG_P (x)
830 && (strict
831 ? STRICT_REG_OK_FOR_BASE_P (x)
832 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
833 return true;
834
835 /* Constant addresses (i.e. +/- 32k) are valid. */
836 if (CONSTANT_ADDRESS_P (x))
837 return true;
838
839 #if TARGET_ABI_OPEN_VMS
840 if (LINKAGE_SYMBOL_REF_P (x))
841 return true;
842 #endif
843
844 /* Register plus a small constant offset is valid. */
845 if (GET_CODE (x) == PLUS)
846 {
847 rtx ofs = XEXP (x, 1);
848 x = XEXP (x, 0);
849
850 /* Discard non-paradoxical subregs. */
851 if (GET_CODE (x) == SUBREG
852 && (GET_MODE_SIZE (GET_MODE (x))
853 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
854 x = SUBREG_REG (x);
855
856 if (REG_P (x))
857 {
858 if (! strict
859 && NONSTRICT_REG_OK_FP_BASE_P (x)
860 && CONST_INT_P (ofs))
861 return true;
862 if ((strict
863 ? STRICT_REG_OK_FOR_BASE_P (x)
864 : NONSTRICT_REG_OK_FOR_BASE_P (x))
865 && CONSTANT_ADDRESS_P (ofs))
866 return true;
867 }
868 }
869
870 /* If we're managing explicit relocations, LO_SUM is valid, as are small
871 data symbols. Avoid explicit relocations of modes larger than word
872 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
873 else if (TARGET_EXPLICIT_RELOCS
874 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
875 {
876 if (small_symbolic_operand (x, Pmode))
877 return true;
878
879 if (GET_CODE (x) == LO_SUM)
880 {
881 rtx ofs = XEXP (x, 1);
882 x = XEXP (x, 0);
883
884 /* Discard non-paradoxical subregs. */
885 if (GET_CODE (x) == SUBREG
886 && (GET_MODE_SIZE (GET_MODE (x))
887 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
888 x = SUBREG_REG (x);
889
890 /* Must have a valid base register. */
891 if (! (REG_P (x)
892 && (strict
893 ? STRICT_REG_OK_FOR_BASE_P (x)
894 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
895 return false;
896
897 /* The symbol must be local. */
898 if (local_symbolic_operand (ofs, Pmode)
899 || dtp32_symbolic_operand (ofs, Pmode)
900 || tp32_symbolic_operand (ofs, Pmode))
901 return true;
902 }
903 }
904
905 return false;
906 }
907
908 /* Build the SYMBOL_REF for __tls_get_addr. */
909
910 static GTY(()) rtx tls_get_addr_libfunc;
911
912 static rtx
913 get_tls_get_addr (void)
914 {
915 if (!tls_get_addr_libfunc)
916 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
917 return tls_get_addr_libfunc;
918 }
919
920 /* Try machine-dependent ways of modifying an illegitimate address
921 to be legitimate. If we find one, return the new, valid address. */
922
923 static rtx
924 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
925 {
926 HOST_WIDE_INT addend;
927
928 /* If the address is (plus reg const_int) and the CONST_INT is not a
929 valid offset, compute the high part of the constant and add it to
930 the register. Then our address is (plus temp low-part-const). */
931 if (GET_CODE (x) == PLUS
932 && REG_P (XEXP (x, 0))
933 && CONST_INT_P (XEXP (x, 1))
934 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
935 {
936 addend = INTVAL (XEXP (x, 1));
937 x = XEXP (x, 0);
938 goto split_addend;
939 }
940
941 /* If the address is (const (plus FOO const_int)), find the low-order
942 part of the CONST_INT. Then load FOO plus any high-order part of the
943 CONST_INT into a register. Our address is (plus reg low-part-const).
944 This is done to reduce the number of GOT entries. */
945 if (can_create_pseudo_p ()
946 && GET_CODE (x) == CONST
947 && GET_CODE (XEXP (x, 0)) == PLUS
948 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
949 {
950 addend = INTVAL (XEXP (XEXP (x, 0), 1));
951 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
952 goto split_addend;
953 }
954
955 /* If we have a (plus reg const), emit the load as in (2), then add
956 the two registers, and finally generate (plus reg low-part-const) as
957 our address. */
958 if (can_create_pseudo_p ()
959 && GET_CODE (x) == PLUS
960 && REG_P (XEXP (x, 0))
961 && GET_CODE (XEXP (x, 1)) == CONST
962 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
963 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
964 {
965 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
966 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
967 XEXP (XEXP (XEXP (x, 1), 0), 0),
968 NULL_RTX, 1, OPTAB_LIB_WIDEN);
969 goto split_addend;
970 }
971
972 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
973 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
974 around +/- 32k offset. */
975 if (TARGET_EXPLICIT_RELOCS
976 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
977 && symbolic_operand (x, Pmode))
978 {
979 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
980
981 switch (tls_symbolic_operand_type (x))
982 {
983 case TLS_MODEL_NONE:
984 break;
985
986 case TLS_MODEL_GLOBAL_DYNAMIC:
987 start_sequence ();
988
989 r0 = gen_rtx_REG (Pmode, 0);
990 r16 = gen_rtx_REG (Pmode, 16);
991 tga = get_tls_get_addr ();
992 dest = gen_reg_rtx (Pmode);
993 seq = GEN_INT (alpha_next_sequence_number++);
994
995 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
996 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
997 insn = emit_call_insn (insn);
998 RTL_CONST_CALL_P (insn) = 1;
999 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1000
1001 insn = get_insns ();
1002 end_sequence ();
1003
1004 emit_libcall_block (insn, dest, r0, x);
1005 return dest;
1006
1007 case TLS_MODEL_LOCAL_DYNAMIC:
1008 start_sequence ();
1009
1010 r0 = gen_rtx_REG (Pmode, 0);
1011 r16 = gen_rtx_REG (Pmode, 16);
1012 tga = get_tls_get_addr ();
1013 scratch = gen_reg_rtx (Pmode);
1014 seq = GEN_INT (alpha_next_sequence_number++);
1015
1016 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1017 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1018 insn = emit_call_insn (insn);
1019 RTL_CONST_CALL_P (insn) = 1;
1020 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1021
1022 insn = get_insns ();
1023 end_sequence ();
1024
1025 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1026 UNSPEC_TLSLDM_CALL);
1027 emit_libcall_block (insn, scratch, r0, eqv);
1028
1029 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1030 eqv = gen_rtx_CONST (Pmode, eqv);
1031
1032 if (alpha_tls_size == 64)
1033 {
1034 dest = gen_reg_rtx (Pmode);
1035 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1036 emit_insn (gen_adddi3 (dest, dest, scratch));
1037 return dest;
1038 }
1039 if (alpha_tls_size == 32)
1040 {
1041 insn = gen_rtx_HIGH (Pmode, eqv);
1042 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1043 scratch = gen_reg_rtx (Pmode);
1044 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1045 }
1046 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1047
1048 case TLS_MODEL_INITIAL_EXEC:
1049 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1050 eqv = gen_rtx_CONST (Pmode, eqv);
1051 tp = gen_reg_rtx (Pmode);
1052 scratch = gen_reg_rtx (Pmode);
1053 dest = gen_reg_rtx (Pmode);
1054
1055 emit_insn (gen_load_tp (tp));
1056 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1057 emit_insn (gen_adddi3 (dest, tp, scratch));
1058 return dest;
1059
1060 case TLS_MODEL_LOCAL_EXEC:
1061 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1062 eqv = gen_rtx_CONST (Pmode, eqv);
1063 tp = gen_reg_rtx (Pmode);
1064
1065 emit_insn (gen_load_tp (tp));
1066 if (alpha_tls_size == 32)
1067 {
1068 insn = gen_rtx_HIGH (Pmode, eqv);
1069 insn = gen_rtx_PLUS (Pmode, tp, insn);
1070 tp = gen_reg_rtx (Pmode);
1071 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1072 }
1073 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1074
1075 default:
1076 gcc_unreachable ();
1077 }
1078
1079 if (local_symbolic_operand (x, Pmode))
1080 {
1081 if (small_symbolic_operand (x, Pmode))
1082 return x;
1083 else
1084 {
1085 if (can_create_pseudo_p ())
1086 scratch = gen_reg_rtx (Pmode);
1087 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1088 gen_rtx_HIGH (Pmode, x)));
1089 return gen_rtx_LO_SUM (Pmode, scratch, x);
1090 }
1091 }
1092 }
1093
1094 return NULL;
1095
1096 split_addend:
1097 {
1098 HOST_WIDE_INT low, high;
1099
1100 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1101 addend -= low;
1102 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1103 addend -= high;
1104
1105 if (addend)
1106 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1107 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1108 1, OPTAB_LIB_WIDEN);
1109 if (high)
1110 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1111 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1112 1, OPTAB_LIB_WIDEN);
1113
1114 return plus_constant (x, low);
1115 }
1116 }
1117
1118
1119 /* Try machine-dependent ways of modifying an illegitimate address
1120 to be legitimate. Return X or the new, valid address. */
1121
1122 static rtx
1123 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1124 enum machine_mode mode)
1125 {
1126 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1127 return new_x ? new_x : x;
1128 }
1129
1130 /* Primarily this is required for TLS symbols, but given that our move
1131 patterns *ought* to be able to handle any symbol at any time, we
1132 should never be spilling symbolic operands to the constant pool, ever. */
1133
1134 static bool
1135 alpha_cannot_force_const_mem (rtx x)
1136 {
1137 enum rtx_code code = GET_CODE (x);
1138 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1139 }
1140
1141 /* We do not allow indirect calls to be optimized into sibling calls, nor
1142 can we allow a call to a function with a different GP to be optimized
1143 into a sibcall. */
1144
1145 static bool
1146 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1147 {
1148 /* Can't do indirect tail calls, since we don't know if the target
1149 uses the same GP. */
1150 if (!decl)
1151 return false;
1152
1153 /* Otherwise, we can make a tail call if the target function shares
1154 the same GP. */
1155 return decl_has_samegp (decl);
1156 }
1157
1158 int
1159 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1160 {
1161 rtx x = *px;
1162
1163 /* Don't re-split. */
1164 if (GET_CODE (x) == LO_SUM)
1165 return -1;
1166
1167 return small_symbolic_operand (x, Pmode) != 0;
1168 }
1169
1170 static int
1171 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1172 {
1173 rtx x = *px;
1174
1175 /* Don't re-split. */
1176 if (GET_CODE (x) == LO_SUM)
1177 return -1;
1178
1179 if (small_symbolic_operand (x, Pmode))
1180 {
1181 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1182 *px = x;
1183 return -1;
1184 }
1185
1186 return 0;
1187 }
1188
1189 rtx
1190 split_small_symbolic_operand (rtx x)
1191 {
1192 x = copy_insn (x);
1193 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1194 return x;
1195 }
1196
1197 /* Indicate that INSN cannot be duplicated. This is true for any insn
1198 that we've marked with gpdisp relocs, since those have to stay in
1199 1-1 correspondence with one another.
1200
1201 Technically we could copy them if we could set up a mapping from one
1202 sequence number to another, across the set of insns to be duplicated.
1203 This seems overly complicated and error-prone since interblock motion
1204 from sched-ebb could move one of the pair of insns to a different block.
1205
1206 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1207 then they'll be in a different block from their ldgp. Which could lead
1208 the bb reorder code to think that it would be ok to copy just the block
1209 containing the call and branch to the block containing the ldgp. */
1210
1211 static bool
1212 alpha_cannot_copy_insn_p (rtx insn)
1213 {
1214 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1215 return false;
1216 if (recog_memoized (insn) >= 0)
1217 return get_attr_cannot_copy (insn);
1218 else
1219 return false;
1220 }
1221
1222
1223 /* Try a machine-dependent way of reloading an illegitimate address
1224 operand. If we find one, push the reload and return the new rtx. */
1225
1226 rtx
1227 alpha_legitimize_reload_address (rtx x,
1228 enum machine_mode mode ATTRIBUTE_UNUSED,
1229 int opnum, int type,
1230 int ind_levels ATTRIBUTE_UNUSED)
1231 {
1232 /* We must recognize output that we have already generated ourselves. */
1233 if (GET_CODE (x) == PLUS
1234 && GET_CODE (XEXP (x, 0)) == PLUS
1235 && REG_P (XEXP (XEXP (x, 0), 0))
1236 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1237 && CONST_INT_P (XEXP (x, 1)))
1238 {
1239 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1240 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1241 opnum, (enum reload_type) type);
1242 return x;
1243 }
1244
1245 /* We wish to handle large displacements off a base register by
1246 splitting the addend across an ldah and the mem insn. This
1247 cuts number of extra insns needed from 3 to 1. */
1248 if (GET_CODE (x) == PLUS
1249 && REG_P (XEXP (x, 0))
1250 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1251 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1252 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1253 {
1254 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1255 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1256 HOST_WIDE_INT high
1257 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1258
1259 /* Check for 32-bit overflow. */
1260 if (high + low != val)
1261 return NULL_RTX;
1262
1263 /* Reload the high part into a base reg; leave the low part
1264 in the mem directly. */
1265 x = gen_rtx_PLUS (GET_MODE (x),
1266 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1267 GEN_INT (high)),
1268 GEN_INT (low));
1269
1270 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1271 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1272 opnum, (enum reload_type) type);
1273 return x;
1274 }
1275
1276 return NULL_RTX;
1277 }
1278 \f
1279 /* Compute a (partial) cost for rtx X. Return true if the complete
1280 cost has been computed, and false if subexpressions should be
1281 scanned. In either case, *TOTAL contains the cost result. */
1282
1283 static bool
1284 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1285 bool speed)
1286 {
1287 enum machine_mode mode = GET_MODE (x);
1288 bool float_mode_p = FLOAT_MODE_P (mode);
1289 const struct alpha_rtx_cost_data *cost_data;
1290
1291 if (!speed)
1292 cost_data = &alpha_rtx_cost_size;
1293 else
1294 cost_data = &alpha_rtx_cost_data[alpha_tune];
1295
1296 switch (code)
1297 {
1298 case CONST_INT:
1299 /* If this is an 8-bit constant, return zero since it can be used
1300 nearly anywhere with no cost. If it is a valid operand for an
1301 ADD or AND, likewise return 0 if we know it will be used in that
1302 context. Otherwise, return 2 since it might be used there later.
1303 All other constants take at least two insns. */
1304 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1305 {
1306 *total = 0;
1307 return true;
1308 }
1309 /* FALLTHRU */
1310
1311 case CONST_DOUBLE:
1312 if (x == CONST0_RTX (mode))
1313 *total = 0;
1314 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1315 || (outer_code == AND && and_operand (x, VOIDmode)))
1316 *total = 0;
1317 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1318 *total = 2;
1319 else
1320 *total = COSTS_N_INSNS (2);
1321 return true;
1322
1323 case CONST:
1324 case SYMBOL_REF:
1325 case LABEL_REF:
1326 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1327 *total = COSTS_N_INSNS (outer_code != MEM);
1328 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1329 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1330 else if (tls_symbolic_operand_type (x))
1331 /* Estimate of cost for call_pal rduniq. */
1332 /* ??? How many insns do we emit here? More than one... */
1333 *total = COSTS_N_INSNS (15);
1334 else
1335 /* Otherwise we do a load from the GOT. */
1336 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1337 return true;
1338
1339 case HIGH:
1340 /* This is effectively an add_operand. */
1341 *total = 2;
1342 return true;
1343
1344 case PLUS:
1345 case MINUS:
1346 if (float_mode_p)
1347 *total = cost_data->fp_add;
1348 else if (GET_CODE (XEXP (x, 0)) == MULT
1349 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1350 {
1351 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1352 (enum rtx_code) outer_code, speed)
1353 + rtx_cost (XEXP (x, 1),
1354 (enum rtx_code) outer_code, speed)
1355 + COSTS_N_INSNS (1));
1356 return true;
1357 }
1358 return false;
1359
1360 case MULT:
1361 if (float_mode_p)
1362 *total = cost_data->fp_mult;
1363 else if (mode == DImode)
1364 *total = cost_data->int_mult_di;
1365 else
1366 *total = cost_data->int_mult_si;
1367 return false;
1368
1369 case ASHIFT:
1370 if (CONST_INT_P (XEXP (x, 1))
1371 && INTVAL (XEXP (x, 1)) <= 3)
1372 {
1373 *total = COSTS_N_INSNS (1);
1374 return false;
1375 }
1376 /* FALLTHRU */
1377
1378 case ASHIFTRT:
1379 case LSHIFTRT:
1380 *total = cost_data->int_shift;
1381 return false;
1382
1383 case IF_THEN_ELSE:
1384 if (float_mode_p)
1385 *total = cost_data->fp_add;
1386 else
1387 *total = cost_data->int_cmov;
1388 return false;
1389
1390 case DIV:
1391 case UDIV:
1392 case MOD:
1393 case UMOD:
1394 if (!float_mode_p)
1395 *total = cost_data->int_div;
1396 else if (mode == SFmode)
1397 *total = cost_data->fp_div_sf;
1398 else
1399 *total = cost_data->fp_div_df;
1400 return false;
1401
1402 case MEM:
1403 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1404 return true;
1405
1406 case NEG:
1407 if (! float_mode_p)
1408 {
1409 *total = COSTS_N_INSNS (1);
1410 return false;
1411 }
1412 /* FALLTHRU */
1413
1414 case ABS:
1415 if (! float_mode_p)
1416 {
1417 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1418 return false;
1419 }
1420 /* FALLTHRU */
1421
1422 case FLOAT:
1423 case UNSIGNED_FLOAT:
1424 case FIX:
1425 case UNSIGNED_FIX:
1426 case FLOAT_TRUNCATE:
1427 *total = cost_data->fp_add;
1428 return false;
1429
1430 case FLOAT_EXTEND:
1431 if (MEM_P (XEXP (x, 0)))
1432 *total = 0;
1433 else
1434 *total = cost_data->fp_add;
1435 return false;
1436
1437 default:
1438 return false;
1439 }
1440 }
1441 \f
1442 /* REF is an alignable memory location. Place an aligned SImode
1443 reference into *PALIGNED_MEM and the number of bits to shift into
1444 *PBITNUM. SCRATCH is a free register for use in reloading out
1445 of range stack slots. */
1446
1447 void
1448 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1449 {
1450 rtx base;
1451 HOST_WIDE_INT disp, offset;
1452
1453 gcc_assert (MEM_P (ref));
1454
1455 if (reload_in_progress
1456 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1457 {
1458 base = find_replacement (&XEXP (ref, 0));
1459 gcc_assert (memory_address_p (GET_MODE (ref), base));
1460 }
1461 else
1462 base = XEXP (ref, 0);
1463
1464 if (GET_CODE (base) == PLUS)
1465 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1466 else
1467 disp = 0;
1468
1469 /* Find the byte offset within an aligned word. If the memory itself is
1470 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1471 will have examined the base register and determined it is aligned, and
1472 thus displacements from it are naturally alignable. */
1473 if (MEM_ALIGN (ref) >= 32)
1474 offset = 0;
1475 else
1476 offset = disp & 3;
1477
1478 /* Access the entire aligned word. */
1479 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1480
1481 /* Convert the byte offset within the word to a bit offset. */
1482 if (WORDS_BIG_ENDIAN)
1483 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1484 else
1485 offset *= 8;
1486 *pbitnum = GEN_INT (offset);
1487 }
1488
1489 /* Similar, but just get the address. Handle the two reload cases.
1490 Add EXTRA_OFFSET to the address we return. */
1491
1492 rtx
1493 get_unaligned_address (rtx ref)
1494 {
1495 rtx base;
1496 HOST_WIDE_INT offset = 0;
1497
1498 gcc_assert (MEM_P (ref));
1499
1500 if (reload_in_progress
1501 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1502 {
1503 base = find_replacement (&XEXP (ref, 0));
1504
1505 gcc_assert (memory_address_p (GET_MODE (ref), base));
1506 }
1507 else
1508 base = XEXP (ref, 0);
1509
1510 if (GET_CODE (base) == PLUS)
1511 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1512
1513 return plus_constant (base, offset);
1514 }
1515
1516 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1517 X is always returned in a register. */
1518
1519 rtx
1520 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1521 {
1522 if (GET_CODE (addr) == PLUS)
1523 {
1524 ofs += INTVAL (XEXP (addr, 1));
1525 addr = XEXP (addr, 0);
1526 }
1527
1528 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1529 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1530 }
1531
1532 /* On the Alpha, all (non-symbolic) constants except zero go into
1533 a floating-point register via memory. Note that we cannot
1534 return anything that is not a subset of RCLASS, and that some
1535 symbolic constants cannot be dropped to memory. */
1536
1537 enum reg_class
1538 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1539 {
1540 /* Zero is present in any register class. */
1541 if (x == CONST0_RTX (GET_MODE (x)))
1542 return rclass;
1543
1544 /* These sorts of constants we can easily drop to memory. */
1545 if (CONST_INT_P (x)
1546 || GET_CODE (x) == CONST_DOUBLE
1547 || GET_CODE (x) == CONST_VECTOR)
1548 {
1549 if (rclass == FLOAT_REGS)
1550 return NO_REGS;
1551 if (rclass == ALL_REGS)
1552 return GENERAL_REGS;
1553 return rclass;
1554 }
1555
1556 /* All other kinds of constants should not (and in the case of HIGH
1557 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1558 secondary reload. */
1559 if (CONSTANT_P (x))
1560 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1561
1562 return rclass;
1563 }
1564
1565 /* Inform reload about cases where moving X with a mode MODE to a register in
1566 RCLASS requires an extra scratch or immediate register. Return the class
1567 needed for the immediate register. */
1568
1569 static enum reg_class
1570 alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1571 enum machine_mode mode, secondary_reload_info *sri)
1572 {
1573 /* Loading and storing HImode or QImode values to and from memory
1574 usually requires a scratch register. */
1575 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1576 {
1577 if (any_memory_operand (x, mode))
1578 {
1579 if (in_p)
1580 {
1581 if (!aligned_memory_operand (x, mode))
1582 sri->icode = reload_in_optab[mode];
1583 }
1584 else
1585 sri->icode = reload_out_optab[mode];
1586 return NO_REGS;
1587 }
1588 }
1589
1590 /* We also cannot do integral arithmetic into FP regs, as might result
1591 from register elimination into a DImode fp register. */
1592 if (rclass == FLOAT_REGS)
1593 {
1594 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1595 return GENERAL_REGS;
1596 if (in_p && INTEGRAL_MODE_P (mode)
1597 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1598 return GENERAL_REGS;
1599 }
1600
1601 return NO_REGS;
1602 }
1603 \f
1604 /* Subfunction of the following function. Update the flags of any MEM
1605 found in part of X. */
1606
1607 static int
1608 alpha_set_memflags_1 (rtx *xp, void *data)
1609 {
1610 rtx x = *xp, orig = (rtx) data;
1611
1612 if (!MEM_P (x))
1613 return 0;
1614
1615 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1616 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1617 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1618 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1619 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1620
1621 /* Sadly, we cannot use alias sets because the extra aliasing
1622 produced by the AND interferes. Given that two-byte quantities
1623 are the only thing we would be able to differentiate anyway,
1624 there does not seem to be any point in convoluting the early
1625 out of the alias check. */
1626
1627 return -1;
1628 }
1629
1630 /* Given SEQ, which is an INSN list, look for any MEMs in either
1631 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1632 volatile flags from REF into each of the MEMs found. If REF is not
1633 a MEM, don't do anything. */
1634
1635 void
1636 alpha_set_memflags (rtx seq, rtx ref)
1637 {
1638 rtx insn;
1639
1640 if (!MEM_P (ref))
1641 return;
1642
1643 /* This is only called from alpha.md, after having had something
1644 generated from one of the insn patterns. So if everything is
1645 zero, the pattern is already up-to-date. */
1646 if (!MEM_VOLATILE_P (ref)
1647 && !MEM_IN_STRUCT_P (ref)
1648 && !MEM_SCALAR_P (ref)
1649 && !MEM_NOTRAP_P (ref)
1650 && !MEM_READONLY_P (ref))
1651 return;
1652
1653 for (insn = seq; insn; insn = NEXT_INSN (insn))
1654 if (INSN_P (insn))
1655 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1656 else
1657 gcc_unreachable ();
1658 }
1659 \f
1660 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1661 int, bool);
1662
1663 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1664 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1665 and return pc_rtx if successful. */
1666
1667 static rtx
1668 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1669 HOST_WIDE_INT c, int n, bool no_output)
1670 {
1671 HOST_WIDE_INT new_const;
1672 int i, bits;
1673 /* Use a pseudo if highly optimizing and still generating RTL. */
1674 rtx subtarget
1675 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1676 rtx temp, insn;
1677
1678 /* If this is a sign-extended 32-bit constant, we can do this in at most
1679 three insns, so do it if we have enough insns left. We always have
1680 a sign-extended 32-bit constant when compiling on a narrow machine. */
1681
1682 if (HOST_BITS_PER_WIDE_INT != 64
1683 || c >> 31 == -1 || c >> 31 == 0)
1684 {
1685 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1686 HOST_WIDE_INT tmp1 = c - low;
1687 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1688 HOST_WIDE_INT extra = 0;
1689
1690 /* If HIGH will be interpreted as negative but the constant is
1691 positive, we must adjust it to do two ldha insns. */
1692
1693 if ((high & 0x8000) != 0 && c >= 0)
1694 {
1695 extra = 0x4000;
1696 tmp1 -= 0x40000000;
1697 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1698 }
1699
1700 if (c == low || (low == 0 && extra == 0))
1701 {
1702 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1703 but that meant that we can't handle INT_MIN on 32-bit machines
1704 (like NT/Alpha), because we recurse indefinitely through
1705 emit_move_insn to gen_movdi. So instead, since we know exactly
1706 what we want, create it explicitly. */
1707
1708 if (no_output)
1709 return pc_rtx;
1710 if (target == NULL)
1711 target = gen_reg_rtx (mode);
1712 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1713 return target;
1714 }
1715 else if (n >= 2 + (extra != 0))
1716 {
1717 if (no_output)
1718 return pc_rtx;
1719 if (!can_create_pseudo_p ())
1720 {
1721 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1722 temp = target;
1723 }
1724 else
1725 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1726 subtarget, mode);
1727
1728 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1729 This means that if we go through expand_binop, we'll try to
1730 generate extensions, etc, which will require new pseudos, which
1731 will fail during some split phases. The SImode add patterns
1732 still exist, but are not named. So build the insns by hand. */
1733
1734 if (extra != 0)
1735 {
1736 if (! subtarget)
1737 subtarget = gen_reg_rtx (mode);
1738 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1739 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1740 emit_insn (insn);
1741 temp = subtarget;
1742 }
1743
1744 if (target == NULL)
1745 target = gen_reg_rtx (mode);
1746 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1747 insn = gen_rtx_SET (VOIDmode, target, insn);
1748 emit_insn (insn);
1749 return target;
1750 }
1751 }
1752
1753 /* If we couldn't do it that way, try some other methods. But if we have
1754 no instructions left, don't bother. Likewise, if this is SImode and
1755 we can't make pseudos, we can't do anything since the expand_binop
1756 and expand_unop calls will widen and try to make pseudos. */
1757
1758 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1759 return 0;
1760
1761 /* Next, see if we can load a related constant and then shift and possibly
1762 negate it to get the constant we want. Try this once each increasing
1763 numbers of insns. */
1764
1765 for (i = 1; i < n; i++)
1766 {
1767 /* First, see if minus some low bits, we've an easy load of
1768 high bits. */
1769
1770 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1771 if (new_const != 0)
1772 {
1773 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1774 if (temp)
1775 {
1776 if (no_output)
1777 return temp;
1778 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1779 target, 0, OPTAB_WIDEN);
1780 }
1781 }
1782
1783 /* Next try complementing. */
1784 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1785 if (temp)
1786 {
1787 if (no_output)
1788 return temp;
1789 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1790 }
1791
1792 /* Next try to form a constant and do a left shift. We can do this
1793 if some low-order bits are zero; the exact_log2 call below tells
1794 us that information. The bits we are shifting out could be any
1795 value, but here we'll just try the 0- and sign-extended forms of
1796 the constant. To try to increase the chance of having the same
1797 constant in more than one insn, start at the highest number of
1798 bits to shift, but try all possibilities in case a ZAPNOT will
1799 be useful. */
1800
1801 bits = exact_log2 (c & -c);
1802 if (bits > 0)
1803 for (; bits > 0; bits--)
1804 {
1805 new_const = c >> bits;
1806 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1807 if (!temp && c < 0)
1808 {
1809 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1810 temp = alpha_emit_set_const (subtarget, mode, new_const,
1811 i, no_output);
1812 }
1813 if (temp)
1814 {
1815 if (no_output)
1816 return temp;
1817 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1818 target, 0, OPTAB_WIDEN);
1819 }
1820 }
1821
1822 /* Now try high-order zero bits. Here we try the shifted-in bits as
1823 all zero and all ones. Be careful to avoid shifting outside the
1824 mode and to avoid shifting outside the host wide int size. */
1825 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1826 confuse the recursive call and set all of the high 32 bits. */
1827
1828 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1829 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1830 if (bits > 0)
1831 for (; bits > 0; bits--)
1832 {
1833 new_const = c << bits;
1834 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1835 if (!temp)
1836 {
1837 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1838 temp = alpha_emit_set_const (subtarget, mode, new_const,
1839 i, no_output);
1840 }
1841 if (temp)
1842 {
1843 if (no_output)
1844 return temp;
1845 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1846 target, 1, OPTAB_WIDEN);
1847 }
1848 }
1849
1850 /* Now try high-order 1 bits. We get that with a sign-extension.
1851 But one bit isn't enough here. Be careful to avoid shifting outside
1852 the mode and to avoid shifting outside the host wide int size. */
1853
1854 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1855 - floor_log2 (~ c) - 2);
1856 if (bits > 0)
1857 for (; bits > 0; bits--)
1858 {
1859 new_const = c << bits;
1860 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1861 if (!temp)
1862 {
1863 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1864 temp = alpha_emit_set_const (subtarget, mode, new_const,
1865 i, no_output);
1866 }
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1872 target, 0, OPTAB_WIDEN);
1873 }
1874 }
1875 }
1876
1877 #if HOST_BITS_PER_WIDE_INT == 64
1878 /* Finally, see if can load a value into the target that is the same as the
1879 constant except that all bytes that are 0 are changed to be 0xff. If we
1880 can, then we can do a ZAPNOT to obtain the desired constant. */
1881
1882 new_const = c;
1883 for (i = 0; i < 64; i += 8)
1884 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1885 new_const |= (HOST_WIDE_INT) 0xff << i;
1886
1887 /* We are only called for SImode and DImode. If this is SImode, ensure that
1888 we are sign extended to a full word. */
1889
1890 if (mode == SImode)
1891 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1892
1893 if (new_const != c)
1894 {
1895 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1896 if (temp)
1897 {
1898 if (no_output)
1899 return temp;
1900 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1901 target, 0, OPTAB_WIDEN);
1902 }
1903 }
1904 #endif
1905
1906 return 0;
1907 }
1908
1909 /* Try to output insns to set TARGET equal to the constant C if it can be
1910 done in less than N insns. Do all computations in MODE. Returns the place
1911 where the output has been placed if it can be done and the insns have been
1912 emitted. If it would take more than N insns, zero is returned and no
1913 insns and emitted. */
1914
1915 static rtx
1916 alpha_emit_set_const (rtx target, enum machine_mode mode,
1917 HOST_WIDE_INT c, int n, bool no_output)
1918 {
1919 enum machine_mode orig_mode = mode;
1920 rtx orig_target = target;
1921 rtx result = 0;
1922 int i;
1923
1924 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1925 can't load this constant in one insn, do this in DImode. */
1926 if (!can_create_pseudo_p () && mode == SImode
1927 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1928 {
1929 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1930 if (result)
1931 return result;
1932
1933 target = no_output ? NULL : gen_lowpart (DImode, target);
1934 mode = DImode;
1935 }
1936 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1937 {
1938 target = no_output ? NULL : gen_lowpart (DImode, target);
1939 mode = DImode;
1940 }
1941
1942 /* Try 1 insn, then 2, then up to N. */
1943 for (i = 1; i <= n; i++)
1944 {
1945 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1946 if (result)
1947 {
1948 rtx insn, set;
1949
1950 if (no_output)
1951 return result;
1952
1953 insn = get_last_insn ();
1954 set = single_set (insn);
1955 if (! CONSTANT_P (SET_SRC (set)))
1956 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1957 break;
1958 }
1959 }
1960
1961 /* Allow for the case where we changed the mode of TARGET. */
1962 if (result)
1963 {
1964 if (result == target)
1965 result = orig_target;
1966 else if (mode != orig_mode)
1967 result = gen_lowpart (orig_mode, result);
1968 }
1969
1970 return result;
1971 }
1972
1973 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1974 fall back to a straight forward decomposition. We do this to avoid
1975 exponential run times encountered when looking for longer sequences
1976 with alpha_emit_set_const. */
1977
1978 static rtx
1979 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1980 {
1981 HOST_WIDE_INT d1, d2, d3, d4;
1982
1983 /* Decompose the entire word */
1984 #if HOST_BITS_PER_WIDE_INT >= 64
1985 gcc_assert (c2 == -(c1 < 0));
1986 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1987 c1 -= d1;
1988 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1989 c1 = (c1 - d2) >> 32;
1990 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1991 c1 -= d3;
1992 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1993 gcc_assert (c1 == d4);
1994 #else
1995 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1996 c1 -= d1;
1997 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1998 gcc_assert (c1 == d2);
1999 c2 += (d2 < 0);
2000 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
2001 c2 -= d3;
2002 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
2003 gcc_assert (c2 == d4);
2004 #endif
2005
2006 /* Construct the high word */
2007 if (d4)
2008 {
2009 emit_move_insn (target, GEN_INT (d4));
2010 if (d3)
2011 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2012 }
2013 else
2014 emit_move_insn (target, GEN_INT (d3));
2015
2016 /* Shift it into place */
2017 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2018
2019 /* Add in the low bits. */
2020 if (d2)
2021 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2022 if (d1)
2023 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2024
2025 return target;
2026 }
2027
2028 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2029 the low 64 bits. */
2030
2031 static void
2032 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2033 {
2034 HOST_WIDE_INT i0, i1;
2035
2036 if (GET_CODE (x) == CONST_VECTOR)
2037 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2038
2039
2040 if (CONST_INT_P (x))
2041 {
2042 i0 = INTVAL (x);
2043 i1 = -(i0 < 0);
2044 }
2045 else if (HOST_BITS_PER_WIDE_INT >= 64)
2046 {
2047 i0 = CONST_DOUBLE_LOW (x);
2048 i1 = -(i0 < 0);
2049 }
2050 else
2051 {
2052 i0 = CONST_DOUBLE_LOW (x);
2053 i1 = CONST_DOUBLE_HIGH (x);
2054 }
2055
2056 *p0 = i0;
2057 *p1 = i1;
2058 }
2059
2060 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2061 are willing to load the value into a register via a move pattern.
2062 Normally this is all symbolic constants, integral constants that
2063 take three or fewer instructions, and floating-point zero. */
2064
2065 bool
2066 alpha_legitimate_constant_p (rtx x)
2067 {
2068 enum machine_mode mode = GET_MODE (x);
2069 HOST_WIDE_INT i0, i1;
2070
2071 switch (GET_CODE (x))
2072 {
2073 case LABEL_REF:
2074 case HIGH:
2075 return true;
2076
2077 case CONST:
2078 if (GET_CODE (XEXP (x, 0)) == PLUS
2079 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2080 x = XEXP (XEXP (x, 0), 0);
2081 else
2082 return true;
2083
2084 if (GET_CODE (x) != SYMBOL_REF)
2085 return true;
2086
2087 /* FALLTHRU */
2088
2089 case SYMBOL_REF:
2090 /* TLS symbols are never valid. */
2091 return SYMBOL_REF_TLS_MODEL (x) == 0;
2092
2093 case CONST_DOUBLE:
2094 if (x == CONST0_RTX (mode))
2095 return true;
2096 if (FLOAT_MODE_P (mode))
2097 return false;
2098 goto do_integer;
2099
2100 case CONST_VECTOR:
2101 if (x == CONST0_RTX (mode))
2102 return true;
2103 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2104 return false;
2105 if (GET_MODE_SIZE (mode) != 8)
2106 return false;
2107 goto do_integer;
2108
2109 case CONST_INT:
2110 do_integer:
2111 if (TARGET_BUILD_CONSTANTS)
2112 return true;
2113 alpha_extract_integer (x, &i0, &i1);
2114 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2115 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2116 return false;
2117
2118 default:
2119 return false;
2120 }
2121 }
2122
2123 /* Operand 1 is known to be a constant, and should require more than one
2124 instruction to load. Emit that multi-part load. */
2125
2126 bool
2127 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2128 {
2129 HOST_WIDE_INT i0, i1;
2130 rtx temp = NULL_RTX;
2131
2132 alpha_extract_integer (operands[1], &i0, &i1);
2133
2134 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2135 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2136
2137 if (!temp && TARGET_BUILD_CONSTANTS)
2138 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2139
2140 if (temp)
2141 {
2142 if (!rtx_equal_p (operands[0], temp))
2143 emit_move_insn (operands[0], temp);
2144 return true;
2145 }
2146
2147 return false;
2148 }
2149
2150 /* Expand a move instruction; return true if all work is done.
2151 We don't handle non-bwx subword loads here. */
2152
2153 bool
2154 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2155 {
2156 rtx tmp;
2157
2158 /* If the output is not a register, the input must be. */
2159 if (MEM_P (operands[0])
2160 && ! reg_or_0_operand (operands[1], mode))
2161 operands[1] = force_reg (mode, operands[1]);
2162
2163 /* Allow legitimize_address to perform some simplifications. */
2164 if (mode == Pmode && symbolic_operand (operands[1], mode))
2165 {
2166 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2167 if (tmp)
2168 {
2169 if (tmp == operands[0])
2170 return true;
2171 operands[1] = tmp;
2172 return false;
2173 }
2174 }
2175
2176 /* Early out for non-constants and valid constants. */
2177 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2178 return false;
2179
2180 /* Split large integers. */
2181 if (CONST_INT_P (operands[1])
2182 || GET_CODE (operands[1]) == CONST_DOUBLE
2183 || GET_CODE (operands[1]) == CONST_VECTOR)
2184 {
2185 if (alpha_split_const_mov (mode, operands))
2186 return true;
2187 }
2188
2189 /* Otherwise we've nothing left but to drop the thing to memory. */
2190 tmp = force_const_mem (mode, operands[1]);
2191
2192 if (tmp == NULL_RTX)
2193 return false;
2194
2195 if (reload_in_progress)
2196 {
2197 emit_move_insn (operands[0], XEXP (tmp, 0));
2198 operands[1] = replace_equiv_address (tmp, operands[0]);
2199 }
2200 else
2201 operands[1] = validize_mem (tmp);
2202 return false;
2203 }
2204
2205 /* Expand a non-bwx QImode or HImode move instruction;
2206 return true if all work is done. */
2207
2208 bool
2209 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2210 {
2211 rtx seq;
2212
2213 /* If the output is not a register, the input must be. */
2214 if (MEM_P (operands[0]))
2215 operands[1] = force_reg (mode, operands[1]);
2216
2217 /* Handle four memory cases, unaligned and aligned for either the input
2218 or the output. The only case where we can be called during reload is
2219 for aligned loads; all other cases require temporaries. */
2220
2221 if (any_memory_operand (operands[1], mode))
2222 {
2223 if (aligned_memory_operand (operands[1], mode))
2224 {
2225 if (reload_in_progress)
2226 {
2227 if (mode == QImode)
2228 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2229 else
2230 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2231 emit_insn (seq);
2232 }
2233 else
2234 {
2235 rtx aligned_mem, bitnum;
2236 rtx scratch = gen_reg_rtx (SImode);
2237 rtx subtarget;
2238 bool copyout;
2239
2240 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2241
2242 subtarget = operands[0];
2243 if (REG_P (subtarget))
2244 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2245 else
2246 subtarget = gen_reg_rtx (DImode), copyout = true;
2247
2248 if (mode == QImode)
2249 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2250 bitnum, scratch);
2251 else
2252 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2253 bitnum, scratch);
2254 emit_insn (seq);
2255
2256 if (copyout)
2257 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2258 }
2259 }
2260 else
2261 {
2262 /* Don't pass these as parameters since that makes the generated
2263 code depend on parameter evaluation order which will cause
2264 bootstrap failures. */
2265
2266 rtx temp1, temp2, subtarget, ua;
2267 bool copyout;
2268
2269 temp1 = gen_reg_rtx (DImode);
2270 temp2 = gen_reg_rtx (DImode);
2271
2272 subtarget = operands[0];
2273 if (REG_P (subtarget))
2274 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2275 else
2276 subtarget = gen_reg_rtx (DImode), copyout = true;
2277
2278 ua = get_unaligned_address (operands[1]);
2279 if (mode == QImode)
2280 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2281 else
2282 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2283
2284 alpha_set_memflags (seq, operands[1]);
2285 emit_insn (seq);
2286
2287 if (copyout)
2288 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2289 }
2290 return true;
2291 }
2292
2293 if (any_memory_operand (operands[0], mode))
2294 {
2295 if (aligned_memory_operand (operands[0], mode))
2296 {
2297 rtx aligned_mem, bitnum;
2298 rtx temp1 = gen_reg_rtx (SImode);
2299 rtx temp2 = gen_reg_rtx (SImode);
2300
2301 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2302
2303 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2304 temp1, temp2));
2305 }
2306 else
2307 {
2308 rtx temp1 = gen_reg_rtx (DImode);
2309 rtx temp2 = gen_reg_rtx (DImode);
2310 rtx temp3 = gen_reg_rtx (DImode);
2311 rtx ua = get_unaligned_address (operands[0]);
2312
2313 if (mode == QImode)
2314 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2315 else
2316 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2317
2318 alpha_set_memflags (seq, operands[0]);
2319 emit_insn (seq);
2320 }
2321 return true;
2322 }
2323
2324 return false;
2325 }
2326
2327 /* Implement the movmisalign patterns. One of the operands is a memory
2328 that is not naturally aligned. Emit instructions to load it. */
2329
2330 void
2331 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2332 {
2333 /* Honor misaligned loads, for those we promised to do so. */
2334 if (MEM_P (operands[1]))
2335 {
2336 rtx tmp;
2337
2338 if (register_operand (operands[0], mode))
2339 tmp = operands[0];
2340 else
2341 tmp = gen_reg_rtx (mode);
2342
2343 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2344 if (tmp != operands[0])
2345 emit_move_insn (operands[0], tmp);
2346 }
2347 else if (MEM_P (operands[0]))
2348 {
2349 if (!reg_or_0_operand (operands[1], mode))
2350 operands[1] = force_reg (mode, operands[1]);
2351 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2352 }
2353 else
2354 gcc_unreachable ();
2355 }
2356
2357 /* Generate an unsigned DImode to FP conversion. This is the same code
2358 optabs would emit if we didn't have TFmode patterns.
2359
2360 For SFmode, this is the only construction I've found that can pass
2361 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2362 intermediates will work, because you'll get intermediate rounding
2363 that ruins the end result. Some of this could be fixed by turning
2364 on round-to-positive-infinity, but that requires diddling the fpsr,
2365 which kills performance. I tried turning this around and converting
2366 to a negative number, so that I could turn on /m, but either I did
2367 it wrong or there's something else cause I wound up with the exact
2368 same single-bit error. There is a branch-less form of this same code:
2369
2370 srl $16,1,$1
2371 and $16,1,$2
2372 cmplt $16,0,$3
2373 or $1,$2,$2
2374 cmovge $16,$16,$2
2375 itoft $3,$f10
2376 itoft $2,$f11
2377 cvtqs $f11,$f11
2378 adds $f11,$f11,$f0
2379 fcmoveq $f10,$f11,$f0
2380
2381 I'm not using it because it's the same number of instructions as
2382 this branch-full form, and it has more serialized long latency
2383 instructions on the critical path.
2384
2385 For DFmode, we can avoid rounding errors by breaking up the word
2386 into two pieces, converting them separately, and adding them back:
2387
2388 LC0: .long 0,0x5f800000
2389
2390 itoft $16,$f11
2391 lda $2,LC0
2392 cmplt $16,0,$1
2393 cpyse $f11,$f31,$f10
2394 cpyse $f31,$f11,$f11
2395 s4addq $1,$2,$1
2396 lds $f12,0($1)
2397 cvtqt $f10,$f10
2398 cvtqt $f11,$f11
2399 addt $f12,$f10,$f0
2400 addt $f0,$f11,$f0
2401
2402 This doesn't seem to be a clear-cut win over the optabs form.
2403 It probably all depends on the distribution of numbers being
2404 converted -- in the optabs form, all but high-bit-set has a
2405 much lower minimum execution time. */
2406
2407 void
2408 alpha_emit_floatuns (rtx operands[2])
2409 {
2410 rtx neglab, donelab, i0, i1, f0, in, out;
2411 enum machine_mode mode;
2412
2413 out = operands[0];
2414 in = force_reg (DImode, operands[1]);
2415 mode = GET_MODE (out);
2416 neglab = gen_label_rtx ();
2417 donelab = gen_label_rtx ();
2418 i0 = gen_reg_rtx (DImode);
2419 i1 = gen_reg_rtx (DImode);
2420 f0 = gen_reg_rtx (mode);
2421
2422 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2423
2424 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2425 emit_jump_insn (gen_jump (donelab));
2426 emit_barrier ();
2427
2428 emit_label (neglab);
2429
2430 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2431 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2432 emit_insn (gen_iordi3 (i0, i0, i1));
2433 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2434 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2435
2436 emit_label (donelab);
2437 }
2438
2439 /* Generate the comparison for a conditional branch. */
2440
2441 void
2442 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2443 {
2444 enum rtx_code cmp_code, branch_code;
2445 enum machine_mode branch_mode = VOIDmode;
2446 enum rtx_code code = GET_CODE (operands[0]);
2447 rtx op0 = operands[1], op1 = operands[2];
2448 rtx tem;
2449
2450 if (cmp_mode == TFmode)
2451 {
2452 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2453 op1 = const0_rtx;
2454 cmp_mode = DImode;
2455 }
2456
2457 /* The general case: fold the comparison code to the types of compares
2458 that we have, choosing the branch as necessary. */
2459 switch (code)
2460 {
2461 case EQ: case LE: case LT: case LEU: case LTU:
2462 case UNORDERED:
2463 /* We have these compares: */
2464 cmp_code = code, branch_code = NE;
2465 break;
2466
2467 case NE:
2468 case ORDERED:
2469 /* These must be reversed. */
2470 cmp_code = reverse_condition (code), branch_code = EQ;
2471 break;
2472
2473 case GE: case GT: case GEU: case GTU:
2474 /* For FP, we swap them, for INT, we reverse them. */
2475 if (cmp_mode == DFmode)
2476 {
2477 cmp_code = swap_condition (code);
2478 branch_code = NE;
2479 tem = op0, op0 = op1, op1 = tem;
2480 }
2481 else
2482 {
2483 cmp_code = reverse_condition (code);
2484 branch_code = EQ;
2485 }
2486 break;
2487
2488 default:
2489 gcc_unreachable ();
2490 }
2491
2492 if (cmp_mode == DFmode)
2493 {
2494 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2495 {
2496 /* When we are not as concerned about non-finite values, and we
2497 are comparing against zero, we can branch directly. */
2498 if (op1 == CONST0_RTX (DFmode))
2499 cmp_code = UNKNOWN, branch_code = code;
2500 else if (op0 == CONST0_RTX (DFmode))
2501 {
2502 /* Undo the swap we probably did just above. */
2503 tem = op0, op0 = op1, op1 = tem;
2504 branch_code = swap_condition (cmp_code);
2505 cmp_code = UNKNOWN;
2506 }
2507 }
2508 else
2509 {
2510 /* ??? We mark the branch mode to be CCmode to prevent the
2511 compare and branch from being combined, since the compare
2512 insn follows IEEE rules that the branch does not. */
2513 branch_mode = CCmode;
2514 }
2515 }
2516 else
2517 {
2518 /* The following optimizations are only for signed compares. */
2519 if (code != LEU && code != LTU && code != GEU && code != GTU)
2520 {
2521 /* Whee. Compare and branch against 0 directly. */
2522 if (op1 == const0_rtx)
2523 cmp_code = UNKNOWN, branch_code = code;
2524
2525 /* If the constants doesn't fit into an immediate, but can
2526 be generated by lda/ldah, we adjust the argument and
2527 compare against zero, so we can use beq/bne directly. */
2528 /* ??? Don't do this when comparing against symbols, otherwise
2529 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2530 be declared false out of hand (at least for non-weak). */
2531 else if (CONST_INT_P (op1)
2532 && (code == EQ || code == NE)
2533 && !(symbolic_operand (op0, VOIDmode)
2534 || (REG_P (op0) && REG_POINTER (op0))))
2535 {
2536 rtx n_op1 = GEN_INT (-INTVAL (op1));
2537
2538 if (! satisfies_constraint_I (op1)
2539 && (satisfies_constraint_K (n_op1)
2540 || satisfies_constraint_L (n_op1)))
2541 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2542 }
2543 }
2544
2545 if (!reg_or_0_operand (op0, DImode))
2546 op0 = force_reg (DImode, op0);
2547 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2548 op1 = force_reg (DImode, op1);
2549 }
2550
2551 /* Emit an initial compare instruction, if necessary. */
2552 tem = op0;
2553 if (cmp_code != UNKNOWN)
2554 {
2555 tem = gen_reg_rtx (cmp_mode);
2556 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2557 }
2558
2559 /* Emit the branch instruction. */
2560 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2561 gen_rtx_IF_THEN_ELSE (VOIDmode,
2562 gen_rtx_fmt_ee (branch_code,
2563 branch_mode, tem,
2564 CONST0_RTX (cmp_mode)),
2565 gen_rtx_LABEL_REF (VOIDmode,
2566 operands[3]),
2567 pc_rtx));
2568 emit_jump_insn (tem);
2569 }
2570
2571 /* Certain simplifications can be done to make invalid setcc operations
2572 valid. Return the final comparison, or NULL if we can't work. */
2573
2574 bool
2575 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2576 {
2577 enum rtx_code cmp_code;
2578 enum rtx_code code = GET_CODE (operands[1]);
2579 rtx op0 = operands[2], op1 = operands[3];
2580 rtx tmp;
2581
2582 if (cmp_mode == TFmode)
2583 {
2584 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2585 op1 = const0_rtx;
2586 cmp_mode = DImode;
2587 }
2588
2589 if (cmp_mode == DFmode && !TARGET_FIX)
2590 return 0;
2591
2592 /* The general case: fold the comparison code to the types of compares
2593 that we have, choosing the branch as necessary. */
2594
2595 cmp_code = UNKNOWN;
2596 switch (code)
2597 {
2598 case EQ: case LE: case LT: case LEU: case LTU:
2599 case UNORDERED:
2600 /* We have these compares. */
2601 if (cmp_mode == DFmode)
2602 cmp_code = code, code = NE;
2603 break;
2604
2605 case NE:
2606 if (cmp_mode == DImode && op1 == const0_rtx)
2607 break;
2608 /* FALLTHRU */
2609
2610 case ORDERED:
2611 cmp_code = reverse_condition (code);
2612 code = EQ;
2613 break;
2614
2615 case GE: case GT: case GEU: case GTU:
2616 /* These normally need swapping, but for integer zero we have
2617 special patterns that recognize swapped operands. */
2618 if (cmp_mode == DImode && op1 == const0_rtx)
2619 break;
2620 code = swap_condition (code);
2621 if (cmp_mode == DFmode)
2622 cmp_code = code, code = NE;
2623 tmp = op0, op0 = op1, op1 = tmp;
2624 break;
2625
2626 default:
2627 gcc_unreachable ();
2628 }
2629
2630 if (cmp_mode == DImode)
2631 {
2632 if (!register_operand (op0, DImode))
2633 op0 = force_reg (DImode, op0);
2634 if (!reg_or_8bit_operand (op1, DImode))
2635 op1 = force_reg (DImode, op1);
2636 }
2637
2638 /* Emit an initial compare instruction, if necessary. */
2639 if (cmp_code != UNKNOWN)
2640 {
2641 tmp = gen_reg_rtx (cmp_mode);
2642 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2643 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2644
2645 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2646 op1 = const0_rtx;
2647 }
2648
2649 /* Emit the setcc instruction. */
2650 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2651 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2652 return true;
2653 }
2654
2655
2656 /* Rewrite a comparison against zero CMP of the form
2657 (CODE (cc0) (const_int 0)) so it can be written validly in
2658 a conditional move (if_then_else CMP ...).
2659 If both of the operands that set cc0 are nonzero we must emit
2660 an insn to perform the compare (it can't be done within
2661 the conditional move). */
2662
2663 rtx
2664 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2665 {
2666 enum rtx_code code = GET_CODE (cmp);
2667 enum rtx_code cmov_code = NE;
2668 rtx op0 = XEXP (cmp, 0);
2669 rtx op1 = XEXP (cmp, 1);
2670 enum machine_mode cmp_mode
2671 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2672 enum machine_mode cmov_mode = VOIDmode;
2673 int local_fast_math = flag_unsafe_math_optimizations;
2674 rtx tem;
2675
2676 if (cmp_mode == TFmode)
2677 {
2678 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2679 op1 = const0_rtx;
2680 cmp_mode = DImode;
2681 }
2682
2683 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2684
2685 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2686 {
2687 enum rtx_code cmp_code;
2688
2689 if (! TARGET_FIX)
2690 return 0;
2691
2692 /* If we have fp<->int register move instructions, do a cmov by
2693 performing the comparison in fp registers, and move the
2694 zero/nonzero value to integer registers, where we can then
2695 use a normal cmov, or vice-versa. */
2696
2697 switch (code)
2698 {
2699 case EQ: case LE: case LT: case LEU: case LTU:
2700 /* We have these compares. */
2701 cmp_code = code, code = NE;
2702 break;
2703
2704 case NE:
2705 /* This must be reversed. */
2706 cmp_code = EQ, code = EQ;
2707 break;
2708
2709 case GE: case GT: case GEU: case GTU:
2710 /* These normally need swapping, but for integer zero we have
2711 special patterns that recognize swapped operands. */
2712 if (cmp_mode == DImode && op1 == const0_rtx)
2713 cmp_code = code, code = NE;
2714 else
2715 {
2716 cmp_code = swap_condition (code);
2717 code = NE;
2718 tem = op0, op0 = op1, op1 = tem;
2719 }
2720 break;
2721
2722 default:
2723 gcc_unreachable ();
2724 }
2725
2726 tem = gen_reg_rtx (cmp_mode);
2727 emit_insn (gen_rtx_SET (VOIDmode, tem,
2728 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2729 op0, op1)));
2730
2731 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2732 op0 = gen_lowpart (cmp_mode, tem);
2733 op1 = CONST0_RTX (cmp_mode);
2734 local_fast_math = 1;
2735 }
2736
2737 /* We may be able to use a conditional move directly.
2738 This avoids emitting spurious compares. */
2739 if (signed_comparison_operator (cmp, VOIDmode)
2740 && (cmp_mode == DImode || local_fast_math)
2741 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2742 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2743
2744 /* We can't put the comparison inside the conditional move;
2745 emit a compare instruction and put that inside the
2746 conditional move. Make sure we emit only comparisons we have;
2747 swap or reverse as necessary. */
2748
2749 if (!can_create_pseudo_p ())
2750 return NULL_RTX;
2751
2752 switch (code)
2753 {
2754 case EQ: case LE: case LT: case LEU: case LTU:
2755 /* We have these compares: */
2756 break;
2757
2758 case NE:
2759 /* This must be reversed. */
2760 code = reverse_condition (code);
2761 cmov_code = EQ;
2762 break;
2763
2764 case GE: case GT: case GEU: case GTU:
2765 /* These must be swapped. */
2766 if (op1 != CONST0_RTX (cmp_mode))
2767 {
2768 code = swap_condition (code);
2769 tem = op0, op0 = op1, op1 = tem;
2770 }
2771 break;
2772
2773 default:
2774 gcc_unreachable ();
2775 }
2776
2777 if (cmp_mode == DImode)
2778 {
2779 if (!reg_or_0_operand (op0, DImode))
2780 op0 = force_reg (DImode, op0);
2781 if (!reg_or_8bit_operand (op1, DImode))
2782 op1 = force_reg (DImode, op1);
2783 }
2784
2785 /* ??? We mark the branch mode to be CCmode to prevent the compare
2786 and cmov from being combined, since the compare insn follows IEEE
2787 rules that the cmov does not. */
2788 if (cmp_mode == DFmode && !local_fast_math)
2789 cmov_mode = CCmode;
2790
2791 tem = gen_reg_rtx (cmp_mode);
2792 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2793 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2794 }
2795
2796 /* Simplify a conditional move of two constants into a setcc with
2797 arithmetic. This is done with a splitter since combine would
2798 just undo the work if done during code generation. It also catches
2799 cases we wouldn't have before cse. */
2800
2801 int
2802 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2803 rtx t_rtx, rtx f_rtx)
2804 {
2805 HOST_WIDE_INT t, f, diff;
2806 enum machine_mode mode;
2807 rtx target, subtarget, tmp;
2808
2809 mode = GET_MODE (dest);
2810 t = INTVAL (t_rtx);
2811 f = INTVAL (f_rtx);
2812 diff = t - f;
2813
2814 if (((code == NE || code == EQ) && diff < 0)
2815 || (code == GE || code == GT))
2816 {
2817 code = reverse_condition (code);
2818 diff = t, t = f, f = diff;
2819 diff = t - f;
2820 }
2821
2822 subtarget = target = dest;
2823 if (mode != DImode)
2824 {
2825 target = gen_lowpart (DImode, dest);
2826 if (can_create_pseudo_p ())
2827 subtarget = gen_reg_rtx (DImode);
2828 else
2829 subtarget = target;
2830 }
2831 /* Below, we must be careful to use copy_rtx on target and subtarget
2832 in intermediate insns, as they may be a subreg rtx, which may not
2833 be shared. */
2834
2835 if (f == 0 && exact_log2 (diff) > 0
2836 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2837 viable over a longer latency cmove. On EV5, the E0 slot is a
2838 scarce resource, and on EV4 shift has the same latency as a cmove. */
2839 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2840 {
2841 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2842 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2843
2844 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2845 GEN_INT (exact_log2 (t)));
2846 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2847 }
2848 else if (f == 0 && t == -1)
2849 {
2850 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2851 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2852
2853 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2854 }
2855 else if (diff == 1 || diff == 4 || diff == 8)
2856 {
2857 rtx add_op;
2858
2859 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2860 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2861
2862 if (diff == 1)
2863 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2864 else
2865 {
2866 add_op = GEN_INT (f);
2867 if (sext_add_operand (add_op, mode))
2868 {
2869 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2870 GEN_INT (diff));
2871 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2872 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2873 }
2874 else
2875 return 0;
2876 }
2877 }
2878 else
2879 return 0;
2880
2881 return 1;
2882 }
2883 \f
2884 /* Look up the function X_floating library function name for the
2885 given operation. */
2886
2887 struct GTY(()) xfloating_op
2888 {
2889 const enum rtx_code code;
2890 const char *const GTY((skip)) osf_func;
2891 const char *const GTY((skip)) vms_func;
2892 rtx libcall;
2893 };
2894
2895 static GTY(()) struct xfloating_op xfloating_ops[] =
2896 {
2897 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2898 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2899 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2900 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2901 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2902 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2903 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2904 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2905 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2906 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2907 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2908 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2909 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2910 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2911 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2912 };
2913
2914 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2915 {
2916 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2917 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2918 };
2919
2920 static rtx
2921 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2922 {
2923 struct xfloating_op *ops = xfloating_ops;
2924 long n = ARRAY_SIZE (xfloating_ops);
2925 long i;
2926
2927 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2928
2929 /* How irritating. Nothing to key off for the main table. */
2930 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2931 {
2932 ops = vax_cvt_ops;
2933 n = ARRAY_SIZE (vax_cvt_ops);
2934 }
2935
2936 for (i = 0; i < n; ++i, ++ops)
2937 if (ops->code == code)
2938 {
2939 rtx func = ops->libcall;
2940 if (!func)
2941 {
2942 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2943 ? ops->vms_func : ops->osf_func);
2944 ops->libcall = func;
2945 }
2946 return func;
2947 }
2948
2949 gcc_unreachable ();
2950 }
2951
2952 /* Most X_floating operations take the rounding mode as an argument.
2953 Compute that here. */
2954
2955 static int
2956 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2957 enum alpha_fp_rounding_mode round)
2958 {
2959 int mode;
2960
2961 switch (round)
2962 {
2963 case ALPHA_FPRM_NORM:
2964 mode = 2;
2965 break;
2966 case ALPHA_FPRM_MINF:
2967 mode = 1;
2968 break;
2969 case ALPHA_FPRM_CHOP:
2970 mode = 0;
2971 break;
2972 case ALPHA_FPRM_DYN:
2973 mode = 4;
2974 break;
2975 default:
2976 gcc_unreachable ();
2977
2978 /* XXX For reference, round to +inf is mode = 3. */
2979 }
2980
2981 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2982 mode |= 0x10000;
2983
2984 return mode;
2985 }
2986
2987 /* Emit an X_floating library function call.
2988
2989 Note that these functions do not follow normal calling conventions:
2990 TFmode arguments are passed in two integer registers (as opposed to
2991 indirect); TFmode return values appear in R16+R17.
2992
2993 FUNC is the function to call.
2994 TARGET is where the output belongs.
2995 OPERANDS are the inputs.
2996 NOPERANDS is the count of inputs.
2997 EQUIV is the expression equivalent for the function.
2998 */
2999
3000 static void
3001 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
3002 int noperands, rtx equiv)
3003 {
3004 rtx usage = NULL_RTX, tmp, reg;
3005 int regno = 16, i;
3006
3007 start_sequence ();
3008
3009 for (i = 0; i < noperands; ++i)
3010 {
3011 switch (GET_MODE (operands[i]))
3012 {
3013 case TFmode:
3014 reg = gen_rtx_REG (TFmode, regno);
3015 regno += 2;
3016 break;
3017
3018 case DFmode:
3019 reg = gen_rtx_REG (DFmode, regno + 32);
3020 regno += 1;
3021 break;
3022
3023 case VOIDmode:
3024 gcc_assert (CONST_INT_P (operands[i]));
3025 /* FALLTHRU */
3026 case DImode:
3027 reg = gen_rtx_REG (DImode, regno);
3028 regno += 1;
3029 break;
3030
3031 default:
3032 gcc_unreachable ();
3033 }
3034
3035 emit_move_insn (reg, operands[i]);
3036 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3037 }
3038
3039 switch (GET_MODE (target))
3040 {
3041 case TFmode:
3042 reg = gen_rtx_REG (TFmode, 16);
3043 break;
3044 case DFmode:
3045 reg = gen_rtx_REG (DFmode, 32);
3046 break;
3047 case DImode:
3048 reg = gen_rtx_REG (DImode, 0);
3049 break;
3050 default:
3051 gcc_unreachable ();
3052 }
3053
3054 tmp = gen_rtx_MEM (QImode, func);
3055 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3056 const0_rtx, const0_rtx));
3057 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3058 RTL_CONST_CALL_P (tmp) = 1;
3059
3060 tmp = get_insns ();
3061 end_sequence ();
3062
3063 emit_libcall_block (tmp, target, reg, equiv);
3064 }
3065
3066 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3067
3068 void
3069 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3070 {
3071 rtx func;
3072 int mode;
3073 rtx out_operands[3];
3074
3075 func = alpha_lookup_xfloating_lib_func (code);
3076 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3077
3078 out_operands[0] = operands[1];
3079 out_operands[1] = operands[2];
3080 out_operands[2] = GEN_INT (mode);
3081 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3082 gen_rtx_fmt_ee (code, TFmode, operands[1],
3083 operands[2]));
3084 }
3085
3086 /* Emit an X_floating library function call for a comparison. */
3087
3088 static rtx
3089 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3090 {
3091 enum rtx_code cmp_code, res_code;
3092 rtx func, out, operands[2], note;
3093
3094 /* X_floating library comparison functions return
3095 -1 unordered
3096 0 false
3097 1 true
3098 Convert the compare against the raw return value. */
3099
3100 cmp_code = *pcode;
3101 switch (cmp_code)
3102 {
3103 case UNORDERED:
3104 cmp_code = EQ;
3105 res_code = LT;
3106 break;
3107 case ORDERED:
3108 cmp_code = EQ;
3109 res_code = GE;
3110 break;
3111 case NE:
3112 res_code = NE;
3113 break;
3114 case EQ:
3115 case LT:
3116 case GT:
3117 case LE:
3118 case GE:
3119 res_code = GT;
3120 break;
3121 default:
3122 gcc_unreachable ();
3123 }
3124 *pcode = res_code;
3125
3126 func = alpha_lookup_xfloating_lib_func (cmp_code);
3127
3128 operands[0] = op0;
3129 operands[1] = op1;
3130 out = gen_reg_rtx (DImode);
3131
3132 /* What's actually returned is -1,0,1, not a proper boolean value,
3133 so use an EXPR_LIST as with a generic libcall instead of a
3134 comparison type expression. */
3135 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3136 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3137 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3138 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3139
3140 return out;
3141 }
3142
3143 /* Emit an X_floating library function call for a conversion. */
3144
3145 void
3146 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3147 {
3148 int noperands = 1, mode;
3149 rtx out_operands[2];
3150 rtx func;
3151 enum rtx_code code = orig_code;
3152
3153 if (code == UNSIGNED_FIX)
3154 code = FIX;
3155
3156 func = alpha_lookup_xfloating_lib_func (code);
3157
3158 out_operands[0] = operands[1];
3159
3160 switch (code)
3161 {
3162 case FIX:
3163 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3164 out_operands[1] = GEN_INT (mode);
3165 noperands = 2;
3166 break;
3167 case FLOAT_TRUNCATE:
3168 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3169 out_operands[1] = GEN_INT (mode);
3170 noperands = 2;
3171 break;
3172 default:
3173 break;
3174 }
3175
3176 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3177 gen_rtx_fmt_e (orig_code,
3178 GET_MODE (operands[0]),
3179 operands[1]));
3180 }
3181
3182 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3183 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3184 guarantee that the sequence
3185 set (OP[0] OP[2])
3186 set (OP[1] OP[3])
3187 is valid. Naturally, output operand ordering is little-endian.
3188 This is used by *movtf_internal and *movti_internal. */
3189
3190 void
3191 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3192 bool fixup_overlap)
3193 {
3194 switch (GET_CODE (operands[1]))
3195 {
3196 case REG:
3197 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3198 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3199 break;
3200
3201 case MEM:
3202 operands[3] = adjust_address (operands[1], DImode, 8);
3203 operands[2] = adjust_address (operands[1], DImode, 0);
3204 break;
3205
3206 case CONST_INT:
3207 case CONST_DOUBLE:
3208 gcc_assert (operands[1] == CONST0_RTX (mode));
3209 operands[2] = operands[3] = const0_rtx;
3210 break;
3211
3212 default:
3213 gcc_unreachable ();
3214 }
3215
3216 switch (GET_CODE (operands[0]))
3217 {
3218 case REG:
3219 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3220 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3221 break;
3222
3223 case MEM:
3224 operands[1] = adjust_address (operands[0], DImode, 8);
3225 operands[0] = adjust_address (operands[0], DImode, 0);
3226 break;
3227
3228 default:
3229 gcc_unreachable ();
3230 }
3231
3232 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3233 {
3234 rtx tmp;
3235 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3236 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3237 }
3238 }
3239
3240 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3241 op2 is a register containing the sign bit, operation is the
3242 logical operation to be performed. */
3243
3244 void
3245 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3246 {
3247 rtx high_bit = operands[2];
3248 rtx scratch;
3249 int move;
3250
3251 alpha_split_tmode_pair (operands, TFmode, false);
3252
3253 /* Detect three flavors of operand overlap. */
3254 move = 1;
3255 if (rtx_equal_p (operands[0], operands[2]))
3256 move = 0;
3257 else if (rtx_equal_p (operands[1], operands[2]))
3258 {
3259 if (rtx_equal_p (operands[0], high_bit))
3260 move = 2;
3261 else
3262 move = -1;
3263 }
3264
3265 if (move < 0)
3266 emit_move_insn (operands[0], operands[2]);
3267
3268 /* ??? If the destination overlaps both source tf and high_bit, then
3269 assume source tf is dead in its entirety and use the other half
3270 for a scratch register. Otherwise "scratch" is just the proper
3271 destination register. */
3272 scratch = operands[move < 2 ? 1 : 3];
3273
3274 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3275
3276 if (move > 0)
3277 {
3278 emit_move_insn (operands[0], operands[2]);
3279 if (move > 1)
3280 emit_move_insn (operands[1], scratch);
3281 }
3282 }
3283 \f
3284 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3285 unaligned data:
3286
3287 unsigned: signed:
3288 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3289 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3290 lda r3,X(r11) lda r3,X+2(r11)
3291 extwl r1,r3,r1 extql r1,r3,r1
3292 extwh r2,r3,r2 extqh r2,r3,r2
3293 or r1.r2.r1 or r1,r2,r1
3294 sra r1,48,r1
3295
3296 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3297 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3298 lda r3,X(r11) lda r3,X(r11)
3299 extll r1,r3,r1 extll r1,r3,r1
3300 extlh r2,r3,r2 extlh r2,r3,r2
3301 or r1.r2.r1 addl r1,r2,r1
3302
3303 quad: ldq_u r1,X(r11)
3304 ldq_u r2,X+7(r11)
3305 lda r3,X(r11)
3306 extql r1,r3,r1
3307 extqh r2,r3,r2
3308 or r1.r2.r1
3309 */
3310
3311 void
3312 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3313 HOST_WIDE_INT ofs, int sign)
3314 {
3315 rtx meml, memh, addr, extl, exth, tmp, mema;
3316 enum machine_mode mode;
3317
3318 if (TARGET_BWX && size == 2)
3319 {
3320 meml = adjust_address (mem, QImode, ofs);
3321 memh = adjust_address (mem, QImode, ofs+1);
3322 if (BYTES_BIG_ENDIAN)
3323 tmp = meml, meml = memh, memh = tmp;
3324 extl = gen_reg_rtx (DImode);
3325 exth = gen_reg_rtx (DImode);
3326 emit_insn (gen_zero_extendqidi2 (extl, meml));
3327 emit_insn (gen_zero_extendqidi2 (exth, memh));
3328 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3329 NULL, 1, OPTAB_LIB_WIDEN);
3330 addr = expand_simple_binop (DImode, IOR, extl, exth,
3331 NULL, 1, OPTAB_LIB_WIDEN);
3332
3333 if (sign && GET_MODE (tgt) != HImode)
3334 {
3335 addr = gen_lowpart (HImode, addr);
3336 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3337 }
3338 else
3339 {
3340 if (GET_MODE (tgt) != DImode)
3341 addr = gen_lowpart (GET_MODE (tgt), addr);
3342 emit_move_insn (tgt, addr);
3343 }
3344 return;
3345 }
3346
3347 meml = gen_reg_rtx (DImode);
3348 memh = gen_reg_rtx (DImode);
3349 addr = gen_reg_rtx (DImode);
3350 extl = gen_reg_rtx (DImode);
3351 exth = gen_reg_rtx (DImode);
3352
3353 mema = XEXP (mem, 0);
3354 if (GET_CODE (mema) == LO_SUM)
3355 mema = force_reg (Pmode, mema);
3356
3357 /* AND addresses cannot be in any alias set, since they may implicitly
3358 alias surrounding code. Ideally we'd have some alias set that
3359 covered all types except those with alignment 8 or higher. */
3360
3361 tmp = change_address (mem, DImode,
3362 gen_rtx_AND (DImode,
3363 plus_constant (mema, ofs),
3364 GEN_INT (-8)));
3365 set_mem_alias_set (tmp, 0);
3366 emit_move_insn (meml, tmp);
3367
3368 tmp = change_address (mem, DImode,
3369 gen_rtx_AND (DImode,
3370 plus_constant (mema, ofs + size - 1),
3371 GEN_INT (-8)));
3372 set_mem_alias_set (tmp, 0);
3373 emit_move_insn (memh, tmp);
3374
3375 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3376 {
3377 emit_move_insn (addr, plus_constant (mema, -1));
3378
3379 emit_insn (gen_extqh_be (extl, meml, addr));
3380 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3381
3382 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3383 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3384 addr, 1, OPTAB_WIDEN);
3385 }
3386 else if (sign && size == 2)
3387 {
3388 emit_move_insn (addr, plus_constant (mema, ofs+2));
3389
3390 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3391 emit_insn (gen_extqh_le (exth, memh, addr));
3392
3393 /* We must use tgt here for the target. Alpha-vms port fails if we use
3394 addr for the target, because addr is marked as a pointer and combine
3395 knows that pointers are always sign-extended 32-bit values. */
3396 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3397 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3398 addr, 1, OPTAB_WIDEN);
3399 }
3400 else
3401 {
3402 if (WORDS_BIG_ENDIAN)
3403 {
3404 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3405 switch ((int) size)
3406 {
3407 case 2:
3408 emit_insn (gen_extwh_be (extl, meml, addr));
3409 mode = HImode;
3410 break;
3411
3412 case 4:
3413 emit_insn (gen_extlh_be (extl, meml, addr));
3414 mode = SImode;
3415 break;
3416
3417 case 8:
3418 emit_insn (gen_extqh_be (extl, meml, addr));
3419 mode = DImode;
3420 break;
3421
3422 default:
3423 gcc_unreachable ();
3424 }
3425 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3426 }
3427 else
3428 {
3429 emit_move_insn (addr, plus_constant (mema, ofs));
3430 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3431 switch ((int) size)
3432 {
3433 case 2:
3434 emit_insn (gen_extwh_le (exth, memh, addr));
3435 mode = HImode;
3436 break;
3437
3438 case 4:
3439 emit_insn (gen_extlh_le (exth, memh, addr));
3440 mode = SImode;
3441 break;
3442
3443 case 8:
3444 emit_insn (gen_extqh_le (exth, memh, addr));
3445 mode = DImode;
3446 break;
3447
3448 default:
3449 gcc_unreachable ();
3450 }
3451 }
3452
3453 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3454 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3455 sign, OPTAB_WIDEN);
3456 }
3457
3458 if (addr != tgt)
3459 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3460 }
3461
3462 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3463
3464 void
3465 alpha_expand_unaligned_store (rtx dst, rtx src,
3466 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3467 {
3468 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3469
3470 if (TARGET_BWX && size == 2)
3471 {
3472 if (src != const0_rtx)
3473 {
3474 dstl = gen_lowpart (QImode, src);
3475 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3476 NULL, 1, OPTAB_LIB_WIDEN);
3477 dsth = gen_lowpart (QImode, dsth);
3478 }
3479 else
3480 dstl = dsth = const0_rtx;
3481
3482 meml = adjust_address (dst, QImode, ofs);
3483 memh = adjust_address (dst, QImode, ofs+1);
3484 if (BYTES_BIG_ENDIAN)
3485 addr = meml, meml = memh, memh = addr;
3486
3487 emit_move_insn (meml, dstl);
3488 emit_move_insn (memh, dsth);
3489 return;
3490 }
3491
3492 dstl = gen_reg_rtx (DImode);
3493 dsth = gen_reg_rtx (DImode);
3494 insl = gen_reg_rtx (DImode);
3495 insh = gen_reg_rtx (DImode);
3496
3497 dsta = XEXP (dst, 0);
3498 if (GET_CODE (dsta) == LO_SUM)
3499 dsta = force_reg (Pmode, dsta);
3500
3501 /* AND addresses cannot be in any alias set, since they may implicitly
3502 alias surrounding code. Ideally we'd have some alias set that
3503 covered all types except those with alignment 8 or higher. */
3504
3505 meml = change_address (dst, DImode,
3506 gen_rtx_AND (DImode,
3507 plus_constant (dsta, ofs),
3508 GEN_INT (-8)));
3509 set_mem_alias_set (meml, 0);
3510
3511 memh = change_address (dst, DImode,
3512 gen_rtx_AND (DImode,
3513 plus_constant (dsta, ofs + size - 1),
3514 GEN_INT (-8)));
3515 set_mem_alias_set (memh, 0);
3516
3517 emit_move_insn (dsth, memh);
3518 emit_move_insn (dstl, meml);
3519 if (WORDS_BIG_ENDIAN)
3520 {
3521 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3522
3523 if (src != const0_rtx)
3524 {
3525 switch ((int) size)
3526 {
3527 case 2:
3528 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3529 break;
3530 case 4:
3531 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3532 break;
3533 case 8:
3534 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3535 break;
3536 }
3537 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3538 GEN_INT (size*8), addr));
3539 }
3540
3541 switch ((int) size)
3542 {
3543 case 2:
3544 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3545 break;
3546 case 4:
3547 {
3548 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3549 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3550 break;
3551 }
3552 case 8:
3553 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3554 break;
3555 }
3556
3557 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3558 }
3559 else
3560 {
3561 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3562
3563 if (src != CONST0_RTX (GET_MODE (src)))
3564 {
3565 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3566 GEN_INT (size*8), addr));
3567
3568 switch ((int) size)
3569 {
3570 case 2:
3571 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3572 break;
3573 case 4:
3574 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3575 break;
3576 case 8:
3577 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3578 break;
3579 }
3580 }
3581
3582 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3583
3584 switch ((int) size)
3585 {
3586 case 2:
3587 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3588 break;
3589 case 4:
3590 {
3591 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3592 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3593 break;
3594 }
3595 case 8:
3596 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3597 break;
3598 }
3599 }
3600
3601 if (src != CONST0_RTX (GET_MODE (src)))
3602 {
3603 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3604 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3605 }
3606
3607 if (WORDS_BIG_ENDIAN)
3608 {
3609 emit_move_insn (meml, dstl);
3610 emit_move_insn (memh, dsth);
3611 }
3612 else
3613 {
3614 /* Must store high before low for degenerate case of aligned. */
3615 emit_move_insn (memh, dsth);
3616 emit_move_insn (meml, dstl);
3617 }
3618 }
3619
3620 /* The block move code tries to maximize speed by separating loads and
3621 stores at the expense of register pressure: we load all of the data
3622 before we store it back out. There are two secondary effects worth
3623 mentioning, that this speeds copying to/from aligned and unaligned
3624 buffers, and that it makes the code significantly easier to write. */
3625
3626 #define MAX_MOVE_WORDS 8
3627
3628 /* Load an integral number of consecutive unaligned quadwords. */
3629
3630 static void
3631 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3632 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3633 {
3634 rtx const im8 = GEN_INT (-8);
3635 rtx const i64 = GEN_INT (64);
3636 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3637 rtx sreg, areg, tmp, smema;
3638 HOST_WIDE_INT i;
3639
3640 smema = XEXP (smem, 0);
3641 if (GET_CODE (smema) == LO_SUM)
3642 smema = force_reg (Pmode, smema);
3643
3644 /* Generate all the tmp registers we need. */
3645 for (i = 0; i < words; ++i)
3646 {
3647 data_regs[i] = out_regs[i];
3648 ext_tmps[i] = gen_reg_rtx (DImode);
3649 }
3650 data_regs[words] = gen_reg_rtx (DImode);
3651
3652 if (ofs != 0)
3653 smem = adjust_address (smem, GET_MODE (smem), ofs);
3654
3655 /* Load up all of the source data. */
3656 for (i = 0; i < words; ++i)
3657 {
3658 tmp = change_address (smem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (smema, 8*i),
3661 im8));
3662 set_mem_alias_set (tmp, 0);
3663 emit_move_insn (data_regs[i], tmp);
3664 }
3665
3666 tmp = change_address (smem, DImode,
3667 gen_rtx_AND (DImode,
3668 plus_constant (smema, 8*words - 1),
3669 im8));
3670 set_mem_alias_set (tmp, 0);
3671 emit_move_insn (data_regs[words], tmp);
3672
3673 /* Extract the half-word fragments. Unfortunately DEC decided to make
3674 extxh with offset zero a noop instead of zeroing the register, so
3675 we must take care of that edge condition ourselves with cmov. */
3676
3677 sreg = copy_addr_to_reg (smema);
3678 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3679 1, OPTAB_WIDEN);
3680 if (WORDS_BIG_ENDIAN)
3681 emit_move_insn (sreg, plus_constant (sreg, 7));
3682 for (i = 0; i < words; ++i)
3683 {
3684 if (WORDS_BIG_ENDIAN)
3685 {
3686 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3687 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3688 }
3689 else
3690 {
3691 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3692 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3693 }
3694 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3695 gen_rtx_IF_THEN_ELSE (DImode,
3696 gen_rtx_EQ (DImode, areg,
3697 const0_rtx),
3698 const0_rtx, ext_tmps[i])));
3699 }
3700
3701 /* Merge the half-words into whole words. */
3702 for (i = 0; i < words; ++i)
3703 {
3704 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3705 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3706 }
3707 }
3708
3709 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3710 may be NULL to store zeros. */
3711
3712 static void
3713 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3714 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3715 {
3716 rtx const im8 = GEN_INT (-8);
3717 rtx const i64 = GEN_INT (64);
3718 rtx ins_tmps[MAX_MOVE_WORDS];
3719 rtx st_tmp_1, st_tmp_2, dreg;
3720 rtx st_addr_1, st_addr_2, dmema;
3721 HOST_WIDE_INT i;
3722
3723 dmema = XEXP (dmem, 0);
3724 if (GET_CODE (dmema) == LO_SUM)
3725 dmema = force_reg (Pmode, dmema);
3726
3727 /* Generate all the tmp registers we need. */
3728 if (data_regs != NULL)
3729 for (i = 0; i < words; ++i)
3730 ins_tmps[i] = gen_reg_rtx(DImode);
3731 st_tmp_1 = gen_reg_rtx(DImode);
3732 st_tmp_2 = gen_reg_rtx(DImode);
3733
3734 if (ofs != 0)
3735 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3736
3737 st_addr_2 = change_address (dmem, DImode,
3738 gen_rtx_AND (DImode,
3739 plus_constant (dmema, words*8 - 1),
3740 im8));
3741 set_mem_alias_set (st_addr_2, 0);
3742
3743 st_addr_1 = change_address (dmem, DImode,
3744 gen_rtx_AND (DImode, dmema, im8));
3745 set_mem_alias_set (st_addr_1, 0);
3746
3747 /* Load up the destination end bits. */
3748 emit_move_insn (st_tmp_2, st_addr_2);
3749 emit_move_insn (st_tmp_1, st_addr_1);
3750
3751 /* Shift the input data into place. */
3752 dreg = copy_addr_to_reg (dmema);
3753 if (WORDS_BIG_ENDIAN)
3754 emit_move_insn (dreg, plus_constant (dreg, 7));
3755 if (data_regs != NULL)
3756 {
3757 for (i = words-1; i >= 0; --i)
3758 {
3759 if (WORDS_BIG_ENDIAN)
3760 {
3761 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3762 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3763 }
3764 else
3765 {
3766 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3767 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3768 }
3769 }
3770 for (i = words-1; i > 0; --i)
3771 {
3772 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3773 ins_tmps[i-1], ins_tmps[i-1], 1,
3774 OPTAB_WIDEN);
3775 }
3776 }
3777
3778 /* Split and merge the ends with the destination data. */
3779 if (WORDS_BIG_ENDIAN)
3780 {
3781 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3782 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3783 }
3784 else
3785 {
3786 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3787 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3788 }
3789
3790 if (data_regs != NULL)
3791 {
3792 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3793 st_tmp_2, 1, OPTAB_WIDEN);
3794 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3795 st_tmp_1, 1, OPTAB_WIDEN);
3796 }
3797
3798 /* Store it all. */
3799 if (WORDS_BIG_ENDIAN)
3800 emit_move_insn (st_addr_1, st_tmp_1);
3801 else
3802 emit_move_insn (st_addr_2, st_tmp_2);
3803 for (i = words-1; i > 0; --i)
3804 {
3805 rtx tmp = change_address (dmem, DImode,
3806 gen_rtx_AND (DImode,
3807 plus_constant(dmema,
3808 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3809 im8));
3810 set_mem_alias_set (tmp, 0);
3811 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3812 }
3813 if (WORDS_BIG_ENDIAN)
3814 emit_move_insn (st_addr_2, st_tmp_2);
3815 else
3816 emit_move_insn (st_addr_1, st_tmp_1);
3817 }
3818
3819
3820 /* Expand string/block move operations.
3821
3822 operands[0] is the pointer to the destination.
3823 operands[1] is the pointer to the source.
3824 operands[2] is the number of bytes to move.
3825 operands[3] is the alignment. */
3826
3827 int
3828 alpha_expand_block_move (rtx operands[])
3829 {
3830 rtx bytes_rtx = operands[2];
3831 rtx align_rtx = operands[3];
3832 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3833 HOST_WIDE_INT bytes = orig_bytes;
3834 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3835 HOST_WIDE_INT dst_align = src_align;
3836 rtx orig_src = operands[1];
3837 rtx orig_dst = operands[0];
3838 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3839 rtx tmp;
3840 unsigned int i, words, ofs, nregs = 0;
3841
3842 if (orig_bytes <= 0)
3843 return 1;
3844 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3845 return 0;
3846
3847 /* Look for additional alignment information from recorded register info. */
3848
3849 tmp = XEXP (orig_src, 0);
3850 if (REG_P (tmp))
3851 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3852 else if (GET_CODE (tmp) == PLUS
3853 && REG_P (XEXP (tmp, 0))
3854 && CONST_INT_P (XEXP (tmp, 1)))
3855 {
3856 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3857 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3858
3859 if (a > src_align)
3860 {
3861 if (a >= 64 && c % 8 == 0)
3862 src_align = 64;
3863 else if (a >= 32 && c % 4 == 0)
3864 src_align = 32;
3865 else if (a >= 16 && c % 2 == 0)
3866 src_align = 16;
3867 }
3868 }
3869
3870 tmp = XEXP (orig_dst, 0);
3871 if (REG_P (tmp))
3872 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3873 else if (GET_CODE (tmp) == PLUS
3874 && REG_P (XEXP (tmp, 0))
3875 && CONST_INT_P (XEXP (tmp, 1)))
3876 {
3877 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3878 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3879
3880 if (a > dst_align)
3881 {
3882 if (a >= 64 && c % 8 == 0)
3883 dst_align = 64;
3884 else if (a >= 32 && c % 4 == 0)
3885 dst_align = 32;
3886 else if (a >= 16 && c % 2 == 0)
3887 dst_align = 16;
3888 }
3889 }
3890
3891 ofs = 0;
3892 if (src_align >= 64 && bytes >= 8)
3893 {
3894 words = bytes / 8;
3895
3896 for (i = 0; i < words; ++i)
3897 data_regs[nregs + i] = gen_reg_rtx (DImode);
3898
3899 for (i = 0; i < words; ++i)
3900 emit_move_insn (data_regs[nregs + i],
3901 adjust_address (orig_src, DImode, ofs + i * 8));
3902
3903 nregs += words;
3904 bytes -= words * 8;
3905 ofs += words * 8;
3906 }
3907
3908 if (src_align >= 32 && bytes >= 4)
3909 {
3910 words = bytes / 4;
3911
3912 for (i = 0; i < words; ++i)
3913 data_regs[nregs + i] = gen_reg_rtx (SImode);
3914
3915 for (i = 0; i < words; ++i)
3916 emit_move_insn (data_regs[nregs + i],
3917 adjust_address (orig_src, SImode, ofs + i * 4));
3918
3919 nregs += words;
3920 bytes -= words * 4;
3921 ofs += words * 4;
3922 }
3923
3924 if (bytes >= 8)
3925 {
3926 words = bytes / 8;
3927
3928 for (i = 0; i < words+1; ++i)
3929 data_regs[nregs + i] = gen_reg_rtx (DImode);
3930
3931 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3932 words, ofs);
3933
3934 nregs += words;
3935 bytes -= words * 8;
3936 ofs += words * 8;
3937 }
3938
3939 if (! TARGET_BWX && bytes >= 4)
3940 {
3941 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3942 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3943 bytes -= 4;
3944 ofs += 4;
3945 }
3946
3947 if (bytes >= 2)
3948 {
3949 if (src_align >= 16)
3950 {
3951 do {
3952 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3953 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3954 bytes -= 2;
3955 ofs += 2;
3956 } while (bytes >= 2);
3957 }
3958 else if (! TARGET_BWX)
3959 {
3960 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3961 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3962 bytes -= 2;
3963 ofs += 2;
3964 }
3965 }
3966
3967 while (bytes > 0)
3968 {
3969 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3970 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3971 bytes -= 1;
3972 ofs += 1;
3973 }
3974
3975 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3976
3977 /* Now save it back out again. */
3978
3979 i = 0, ofs = 0;
3980
3981 /* Write out the data in whatever chunks reading the source allowed. */
3982 if (dst_align >= 64)
3983 {
3984 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3985 {
3986 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3987 data_regs[i]);
3988 ofs += 8;
3989 i++;
3990 }
3991 }
3992
3993 if (dst_align >= 32)
3994 {
3995 /* If the source has remaining DImode regs, write them out in
3996 two pieces. */
3997 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3998 {
3999 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
4000 NULL_RTX, 1, OPTAB_WIDEN);
4001
4002 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4003 gen_lowpart (SImode, data_regs[i]));
4004 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
4005 gen_lowpart (SImode, tmp));
4006 ofs += 8;
4007 i++;
4008 }
4009
4010 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4011 {
4012 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
4013 data_regs[i]);
4014 ofs += 4;
4015 i++;
4016 }
4017 }
4018
4019 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
4020 {
4021 /* Write out a remaining block of words using unaligned methods. */
4022
4023 for (words = 1; i + words < nregs; words++)
4024 if (GET_MODE (data_regs[i + words]) != DImode)
4025 break;
4026
4027 if (words == 1)
4028 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4029 else
4030 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4031 words, ofs);
4032
4033 i += words;
4034 ofs += words * 8;
4035 }
4036
4037 /* Due to the above, this won't be aligned. */
4038 /* ??? If we have more than one of these, consider constructing full
4039 words in registers and using alpha_expand_unaligned_store_words. */
4040 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4041 {
4042 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4043 ofs += 4;
4044 i++;
4045 }
4046
4047 if (dst_align >= 16)
4048 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4049 {
4050 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4051 i++;
4052 ofs += 2;
4053 }
4054 else
4055 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4056 {
4057 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4058 i++;
4059 ofs += 2;
4060 }
4061
4062 /* The remainder must be byte copies. */
4063 while (i < nregs)
4064 {
4065 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4066 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4067 i++;
4068 ofs += 1;
4069 }
4070
4071 return 1;
4072 }
4073
4074 int
4075 alpha_expand_block_clear (rtx operands[])
4076 {
4077 rtx bytes_rtx = operands[1];
4078 rtx align_rtx = operands[3];
4079 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4080 HOST_WIDE_INT bytes = orig_bytes;
4081 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4082 HOST_WIDE_INT alignofs = 0;
4083 rtx orig_dst = operands[0];
4084 rtx tmp;
4085 int i, words, ofs = 0;
4086
4087 if (orig_bytes <= 0)
4088 return 1;
4089 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4090 return 0;
4091
4092 /* Look for stricter alignment. */
4093 tmp = XEXP (orig_dst, 0);
4094 if (REG_P (tmp))
4095 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4096 else if (GET_CODE (tmp) == PLUS
4097 && REG_P (XEXP (tmp, 0))
4098 && CONST_INT_P (XEXP (tmp, 1)))
4099 {
4100 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4101 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4102
4103 if (a > align)
4104 {
4105 if (a >= 64)
4106 align = a, alignofs = 8 - c % 8;
4107 else if (a >= 32)
4108 align = a, alignofs = 4 - c % 4;
4109 else if (a >= 16)
4110 align = a, alignofs = 2 - c % 2;
4111 }
4112 }
4113
4114 /* Handle an unaligned prefix first. */
4115
4116 if (alignofs > 0)
4117 {
4118 #if HOST_BITS_PER_WIDE_INT >= 64
4119 /* Given that alignofs is bounded by align, the only time BWX could
4120 generate three stores is for a 7 byte fill. Prefer two individual
4121 stores over a load/mask/store sequence. */
4122 if ((!TARGET_BWX || alignofs == 7)
4123 && align >= 32
4124 && !(alignofs == 4 && bytes >= 4))
4125 {
4126 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4127 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4128 rtx mem, tmp;
4129 HOST_WIDE_INT mask;
4130
4131 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4132 set_mem_alias_set (mem, 0);
4133
4134 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4135 if (bytes < alignofs)
4136 {
4137 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4138 ofs += bytes;
4139 bytes = 0;
4140 }
4141 else
4142 {
4143 bytes -= alignofs;
4144 ofs += alignofs;
4145 }
4146 alignofs = 0;
4147
4148 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4149 NULL_RTX, 1, OPTAB_WIDEN);
4150
4151 emit_move_insn (mem, tmp);
4152 }
4153 #endif
4154
4155 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4156 {
4157 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4158 bytes -= 1;
4159 ofs += 1;
4160 alignofs -= 1;
4161 }
4162 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4163 {
4164 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4165 bytes -= 2;
4166 ofs += 2;
4167 alignofs -= 2;
4168 }
4169 if (alignofs == 4 && bytes >= 4)
4170 {
4171 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4172 bytes -= 4;
4173 ofs += 4;
4174 alignofs = 0;
4175 }
4176
4177 /* If we've not used the extra lead alignment information by now,
4178 we won't be able to. Downgrade align to match what's left over. */
4179 if (alignofs > 0)
4180 {
4181 alignofs = alignofs & -alignofs;
4182 align = MIN (align, alignofs * BITS_PER_UNIT);
4183 }
4184 }
4185
4186 /* Handle a block of contiguous long-words. */
4187
4188 if (align >= 64 && bytes >= 8)
4189 {
4190 words = bytes / 8;
4191
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4194 const0_rtx);
4195
4196 bytes -= words * 8;
4197 ofs += words * 8;
4198 }
4199
4200 /* If the block is large and appropriately aligned, emit a single
4201 store followed by a sequence of stq_u insns. */
4202
4203 if (align >= 32 && bytes > 16)
4204 {
4205 rtx orig_dsta;
4206
4207 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4208 bytes -= 4;
4209 ofs += 4;
4210
4211 orig_dsta = XEXP (orig_dst, 0);
4212 if (GET_CODE (orig_dsta) == LO_SUM)
4213 orig_dsta = force_reg (Pmode, orig_dsta);
4214
4215 words = bytes / 8;
4216 for (i = 0; i < words; ++i)
4217 {
4218 rtx mem
4219 = change_address (orig_dst, DImode,
4220 gen_rtx_AND (DImode,
4221 plus_constant (orig_dsta, ofs + i*8),
4222 GEN_INT (-8)));
4223 set_mem_alias_set (mem, 0);
4224 emit_move_insn (mem, const0_rtx);
4225 }
4226
4227 /* Depending on the alignment, the first stq_u may have overlapped
4228 with the initial stl, which means that the last stq_u didn't
4229 write as much as it would appear. Leave those questionable bytes
4230 unaccounted for. */
4231 bytes -= words * 8 - 4;
4232 ofs += words * 8 - 4;
4233 }
4234
4235 /* Handle a smaller block of aligned words. */
4236
4237 if ((align >= 64 && bytes == 4)
4238 || (align == 32 && bytes >= 4))
4239 {
4240 words = bytes / 4;
4241
4242 for (i = 0; i < words; ++i)
4243 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4244 const0_rtx);
4245
4246 bytes -= words * 4;
4247 ofs += words * 4;
4248 }
4249
4250 /* An unaligned block uses stq_u stores for as many as possible. */
4251
4252 if (bytes >= 8)
4253 {
4254 words = bytes / 8;
4255
4256 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4257
4258 bytes -= words * 8;
4259 ofs += words * 8;
4260 }
4261
4262 /* Next clean up any trailing pieces. */
4263
4264 #if HOST_BITS_PER_WIDE_INT >= 64
4265 /* Count the number of bits in BYTES for which aligned stores could
4266 be emitted. */
4267 words = 0;
4268 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4269 if (bytes & i)
4270 words += 1;
4271
4272 /* If we have appropriate alignment (and it wouldn't take too many
4273 instructions otherwise), mask out the bytes we need. */
4274 if (TARGET_BWX ? words > 2 : bytes > 0)
4275 {
4276 if (align >= 64)
4277 {
4278 rtx mem, tmp;
4279 HOST_WIDE_INT mask;
4280
4281 mem = adjust_address (orig_dst, DImode, ofs);
4282 set_mem_alias_set (mem, 0);
4283
4284 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4285
4286 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4287 NULL_RTX, 1, OPTAB_WIDEN);
4288
4289 emit_move_insn (mem, tmp);
4290 return 1;
4291 }
4292 else if (align >= 32 && bytes < 4)
4293 {
4294 rtx mem, tmp;
4295 HOST_WIDE_INT mask;
4296
4297 mem = adjust_address (orig_dst, SImode, ofs);
4298 set_mem_alias_set (mem, 0);
4299
4300 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4301
4302 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4303 NULL_RTX, 1, OPTAB_WIDEN);
4304
4305 emit_move_insn (mem, tmp);
4306 return 1;
4307 }
4308 }
4309 #endif
4310
4311 if (!TARGET_BWX && bytes >= 4)
4312 {
4313 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4314 bytes -= 4;
4315 ofs += 4;
4316 }
4317
4318 if (bytes >= 2)
4319 {
4320 if (align >= 16)
4321 {
4322 do {
4323 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4324 const0_rtx);
4325 bytes -= 2;
4326 ofs += 2;
4327 } while (bytes >= 2);
4328 }
4329 else if (! TARGET_BWX)
4330 {
4331 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4332 bytes -= 2;
4333 ofs += 2;
4334 }
4335 }
4336
4337 while (bytes > 0)
4338 {
4339 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4340 bytes -= 1;
4341 ofs += 1;
4342 }
4343
4344 return 1;
4345 }
4346
4347 /* Returns a mask so that zap(x, value) == x & mask. */
4348
4349 rtx
4350 alpha_expand_zap_mask (HOST_WIDE_INT value)
4351 {
4352 rtx result;
4353 int i;
4354
4355 if (HOST_BITS_PER_WIDE_INT >= 64)
4356 {
4357 HOST_WIDE_INT mask = 0;
4358
4359 for (i = 7; i >= 0; --i)
4360 {
4361 mask <<= 8;
4362 if (!((value >> i) & 1))
4363 mask |= 0xff;
4364 }
4365
4366 result = gen_int_mode (mask, DImode);
4367 }
4368 else
4369 {
4370 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4371
4372 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4373
4374 for (i = 7; i >= 4; --i)
4375 {
4376 mask_hi <<= 8;
4377 if (!((value >> i) & 1))
4378 mask_hi |= 0xff;
4379 }
4380
4381 for (i = 3; i >= 0; --i)
4382 {
4383 mask_lo <<= 8;
4384 if (!((value >> i) & 1))
4385 mask_lo |= 0xff;
4386 }
4387
4388 result = immed_double_const (mask_lo, mask_hi, DImode);
4389 }
4390
4391 return result;
4392 }
4393
4394 void
4395 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4396 enum machine_mode mode,
4397 rtx op0, rtx op1, rtx op2)
4398 {
4399 op0 = gen_lowpart (mode, op0);
4400
4401 if (op1 == const0_rtx)
4402 op1 = CONST0_RTX (mode);
4403 else
4404 op1 = gen_lowpart (mode, op1);
4405
4406 if (op2 == const0_rtx)
4407 op2 = CONST0_RTX (mode);
4408 else
4409 op2 = gen_lowpart (mode, op2);
4410
4411 emit_insn ((*gen) (op0, op1, op2));
4412 }
4413
4414 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4415 COND is true. Mark the jump as unlikely to be taken. */
4416
4417 static void
4418 emit_unlikely_jump (rtx cond, rtx label)
4419 {
4420 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4421 rtx x;
4422
4423 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4424 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4425 add_reg_note (x, REG_BR_PROB, very_unlikely);
4426 }
4427
4428 /* A subroutine of the atomic operation splitters. Emit a load-locked
4429 instruction in MODE. */
4430
4431 static void
4432 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4433 {
4434 rtx (*fn) (rtx, rtx) = NULL;
4435 if (mode == SImode)
4436 fn = gen_load_locked_si;
4437 else if (mode == DImode)
4438 fn = gen_load_locked_di;
4439 emit_insn (fn (reg, mem));
4440 }
4441
4442 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4443 instruction in MODE. */
4444
4445 static void
4446 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4447 {
4448 rtx (*fn) (rtx, rtx, rtx) = NULL;
4449 if (mode == SImode)
4450 fn = gen_store_conditional_si;
4451 else if (mode == DImode)
4452 fn = gen_store_conditional_di;
4453 emit_insn (fn (res, mem, val));
4454 }
4455
4456 /* A subroutine of the atomic operation splitters. Emit an insxl
4457 instruction in MODE. */
4458
4459 static rtx
4460 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4461 {
4462 rtx ret = gen_reg_rtx (DImode);
4463 rtx (*fn) (rtx, rtx, rtx);
4464
4465 if (WORDS_BIG_ENDIAN)
4466 {
4467 if (mode == QImode)
4468 fn = gen_insbl_be;
4469 else
4470 fn = gen_inswl_be;
4471 }
4472 else
4473 {
4474 if (mode == QImode)
4475 fn = gen_insbl_le;
4476 else
4477 fn = gen_inswl_le;
4478 }
4479 /* The insbl and inswl patterns require a register operand. */
4480 op1 = force_reg (mode, op1);
4481 emit_insn (fn (ret, op1, op2));
4482
4483 return ret;
4484 }
4485
4486 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4487 to perform. MEM is the memory on which to operate. VAL is the second
4488 operand of the binary operator. BEFORE and AFTER are optional locations to
4489 return the value of MEM either before of after the operation. SCRATCH is
4490 a scratch register. */
4491
4492 void
4493 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4494 rtx before, rtx after, rtx scratch)
4495 {
4496 enum machine_mode mode = GET_MODE (mem);
4497 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4498
4499 emit_insn (gen_memory_barrier ());
4500
4501 label = gen_label_rtx ();
4502 emit_label (label);
4503 label = gen_rtx_LABEL_REF (DImode, label);
4504
4505 if (before == NULL)
4506 before = scratch;
4507 emit_load_locked (mode, before, mem);
4508
4509 if (code == NOT)
4510 {
4511 x = gen_rtx_AND (mode, before, val);
4512 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4513
4514 x = gen_rtx_NOT (mode, val);
4515 }
4516 else
4517 x = gen_rtx_fmt_ee (code, mode, before, val);
4518 if (after)
4519 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4520 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4521
4522 emit_store_conditional (mode, cond, mem, scratch);
4523
4524 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4525 emit_unlikely_jump (x, label);
4526
4527 emit_insn (gen_memory_barrier ());
4528 }
4529
4530 /* Expand a compare and swap operation. */
4531
4532 void
4533 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4534 rtx scratch)
4535 {
4536 enum machine_mode mode = GET_MODE (mem);
4537 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4538
4539 emit_insn (gen_memory_barrier ());
4540
4541 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4542 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4543 emit_label (XEXP (label1, 0));
4544
4545 emit_load_locked (mode, retval, mem);
4546
4547 x = gen_lowpart (DImode, retval);
4548 if (oldval == const0_rtx)
4549 x = gen_rtx_NE (DImode, x, const0_rtx);
4550 else
4551 {
4552 x = gen_rtx_EQ (DImode, x, oldval);
4553 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4554 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4555 }
4556 emit_unlikely_jump (x, label2);
4557
4558 emit_move_insn (scratch, newval);
4559 emit_store_conditional (mode, cond, mem, scratch);
4560
4561 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4562 emit_unlikely_jump (x, label1);
4563
4564 emit_insn (gen_memory_barrier ());
4565 emit_label (XEXP (label2, 0));
4566 }
4567
4568 void
4569 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4570 {
4571 enum machine_mode mode = GET_MODE (mem);
4572 rtx addr, align, wdst;
4573 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4574
4575 addr = force_reg (DImode, XEXP (mem, 0));
4576 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4577 NULL_RTX, 1, OPTAB_DIRECT);
4578
4579 oldval = convert_modes (DImode, mode, oldval, 1);
4580 newval = emit_insxl (mode, newval, addr);
4581
4582 wdst = gen_reg_rtx (DImode);
4583 if (mode == QImode)
4584 fn5 = gen_sync_compare_and_swapqi_1;
4585 else
4586 fn5 = gen_sync_compare_and_swaphi_1;
4587 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4588
4589 emit_move_insn (dst, gen_lowpart (mode, wdst));
4590 }
4591
4592 void
4593 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4594 rtx oldval, rtx newval, rtx align,
4595 rtx scratch, rtx cond)
4596 {
4597 rtx label1, label2, mem, width, mask, x;
4598
4599 mem = gen_rtx_MEM (DImode, align);
4600 MEM_VOLATILE_P (mem) = 1;
4601
4602 emit_insn (gen_memory_barrier ());
4603 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4604 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4605 emit_label (XEXP (label1, 0));
4606
4607 emit_load_locked (DImode, scratch, mem);
4608
4609 width = GEN_INT (GET_MODE_BITSIZE (mode));
4610 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4611 if (WORDS_BIG_ENDIAN)
4612 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4613 else
4614 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4615
4616 if (oldval == const0_rtx)
4617 x = gen_rtx_NE (DImode, dest, const0_rtx);
4618 else
4619 {
4620 x = gen_rtx_EQ (DImode, dest, oldval);
4621 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4622 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4623 }
4624 emit_unlikely_jump (x, label2);
4625
4626 if (WORDS_BIG_ENDIAN)
4627 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4628 else
4629 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4630 emit_insn (gen_iordi3 (scratch, scratch, newval));
4631
4632 emit_store_conditional (DImode, scratch, mem, scratch);
4633
4634 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4635 emit_unlikely_jump (x, label1);
4636
4637 emit_insn (gen_memory_barrier ());
4638 emit_label (XEXP (label2, 0));
4639 }
4640
4641 /* Expand an atomic exchange operation. */
4642
4643 void
4644 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4645 {
4646 enum machine_mode mode = GET_MODE (mem);
4647 rtx label, x, cond = gen_lowpart (DImode, scratch);
4648
4649 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4650 emit_label (XEXP (label, 0));
4651
4652 emit_load_locked (mode, retval, mem);
4653 emit_move_insn (scratch, val);
4654 emit_store_conditional (mode, cond, mem, scratch);
4655
4656 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4657 emit_unlikely_jump (x, label);
4658
4659 emit_insn (gen_memory_barrier ());
4660 }
4661
4662 void
4663 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4664 {
4665 enum machine_mode mode = GET_MODE (mem);
4666 rtx addr, align, wdst;
4667 rtx (*fn4) (rtx, rtx, rtx, rtx);
4668
4669 /* Force the address into a register. */
4670 addr = force_reg (DImode, XEXP (mem, 0));
4671
4672 /* Align it to a multiple of 8. */
4673 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4674 NULL_RTX, 1, OPTAB_DIRECT);
4675
4676 /* Insert val into the correct byte location within the word. */
4677 val = emit_insxl (mode, val, addr);
4678
4679 wdst = gen_reg_rtx (DImode);
4680 if (mode == QImode)
4681 fn4 = gen_sync_lock_test_and_setqi_1;
4682 else
4683 fn4 = gen_sync_lock_test_and_sethi_1;
4684 emit_insn (fn4 (wdst, addr, val, align));
4685
4686 emit_move_insn (dst, gen_lowpart (mode, wdst));
4687 }
4688
4689 void
4690 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4691 rtx val, rtx align, rtx scratch)
4692 {
4693 rtx label, mem, width, mask, x;
4694
4695 mem = gen_rtx_MEM (DImode, align);
4696 MEM_VOLATILE_P (mem) = 1;
4697
4698 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4699 emit_label (XEXP (label, 0));
4700
4701 emit_load_locked (DImode, scratch, mem);
4702
4703 width = GEN_INT (GET_MODE_BITSIZE (mode));
4704 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4705 if (WORDS_BIG_ENDIAN)
4706 {
4707 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4708 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4709 }
4710 else
4711 {
4712 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4713 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4714 }
4715 emit_insn (gen_iordi3 (scratch, scratch, val));
4716
4717 emit_store_conditional (DImode, scratch, mem, scratch);
4718
4719 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4720 emit_unlikely_jump (x, label);
4721
4722 emit_insn (gen_memory_barrier ());
4723 }
4724 \f
4725 /* Adjust the cost of a scheduling dependency. Return the new cost of
4726 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4727
4728 static int
4729 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4730 {
4731 enum attr_type insn_type, dep_insn_type;
4732
4733 /* If the dependence is an anti-dependence, there is no cost. For an
4734 output dependence, there is sometimes a cost, but it doesn't seem
4735 worth handling those few cases. */
4736 if (REG_NOTE_KIND (link) != 0)
4737 return cost;
4738
4739 /* If we can't recognize the insns, we can't really do anything. */
4740 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4741 return cost;
4742
4743 insn_type = get_attr_type (insn);
4744 dep_insn_type = get_attr_type (dep_insn);
4745
4746 /* Bring in the user-defined memory latency. */
4747 if (dep_insn_type == TYPE_ILD
4748 || dep_insn_type == TYPE_FLD
4749 || dep_insn_type == TYPE_LDSYM)
4750 cost += alpha_memory_latency-1;
4751
4752 /* Everything else handled in DFA bypasses now. */
4753
4754 return cost;
4755 }
4756
4757 /* The number of instructions that can be issued per cycle. */
4758
4759 static int
4760 alpha_issue_rate (void)
4761 {
4762 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4763 }
4764
4765 /* How many alternative schedules to try. This should be as wide as the
4766 scheduling freedom in the DFA, but no wider. Making this value too
4767 large results extra work for the scheduler.
4768
4769 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4770 alternative schedules. For EV5, we can choose between E0/E1 and
4771 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4772
4773 static int
4774 alpha_multipass_dfa_lookahead (void)
4775 {
4776 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4777 }
4778 \f
4779 /* Machine-specific function data. */
4780
4781 struct GTY(()) machine_function
4782 {
4783 /* For unicosmk. */
4784 /* List of call information words for calls from this function. */
4785 struct rtx_def *first_ciw;
4786 struct rtx_def *last_ciw;
4787 int ciw_count;
4788
4789 /* List of deferred case vectors. */
4790 struct rtx_def *addr_list;
4791
4792 /* For OSF. */
4793 const char *some_ld_name;
4794
4795 /* For TARGET_LD_BUGGY_LDGP. */
4796 struct rtx_def *gp_save_rtx;
4797
4798 /* For VMS condition handlers. */
4799 bool uses_condition_handler;
4800 };
4801
4802 /* How to allocate a 'struct machine_function'. */
4803
4804 static struct machine_function *
4805 alpha_init_machine_status (void)
4806 {
4807 return ((struct machine_function *)
4808 ggc_alloc_cleared (sizeof (struct machine_function)));
4809 }
4810
4811 /* Support for frame based VMS condition handlers. */
4812
4813 /* A VMS condition handler may be established for a function with a call to
4814 __builtin_establish_vms_condition_handler, and cancelled with a call to
4815 __builtin_revert_vms_condition_handler.
4816
4817 The VMS Condition Handling Facility knows about the existence of a handler
4818 from the procedure descriptor .handler field. As the VMS native compilers,
4819 we store the user specified handler's address at a fixed location in the
4820 stack frame and point the procedure descriptor at a common wrapper which
4821 fetches the real handler's address and issues an indirect call.
4822
4823 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4824
4825 We force the procedure kind to PT_STACK, and the fixed frame location is
4826 fp+8, just before the register save area. We use the handler_data field in
4827 the procedure descriptor to state the fp offset at which the installed
4828 handler address can be found. */
4829
4830 #define VMS_COND_HANDLER_FP_OFFSET 8
4831
4832 /* Expand code to store the currently installed user VMS condition handler
4833 into TARGET and install HANDLER as the new condition handler. */
4834
4835 void
4836 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4837 {
4838 rtx handler_slot_address
4839 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4840
4841 rtx handler_slot
4842 = gen_rtx_MEM (DImode, handler_slot_address);
4843
4844 emit_move_insn (target, handler_slot);
4845 emit_move_insn (handler_slot, handler);
4846
4847 /* Notify the start/prologue/epilogue emitters that the condition handler
4848 slot is needed. In addition to reserving the slot space, this will force
4849 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4850 use above is correct. */
4851 cfun->machine->uses_condition_handler = true;
4852 }
4853
4854 /* Expand code to store the current VMS condition handler into TARGET and
4855 nullify it. */
4856
4857 void
4858 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4859 {
4860 /* We implement this by establishing a null condition handler, with the tiny
4861 side effect of setting uses_condition_handler. This is a little bit
4862 pessimistic if no actual builtin_establish call is ever issued, which is
4863 not a real problem and expected never to happen anyway. */
4864
4865 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4866 }
4867
4868 /* Functions to save and restore alpha_return_addr_rtx. */
4869
4870 /* Start the ball rolling with RETURN_ADDR_RTX. */
4871
4872 rtx
4873 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4874 {
4875 if (count != 0)
4876 return const0_rtx;
4877
4878 return get_hard_reg_initial_val (Pmode, REG_RA);
4879 }
4880
4881 /* Return or create a memory slot containing the gp value for the current
4882 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4883
4884 rtx
4885 alpha_gp_save_rtx (void)
4886 {
4887 rtx seq, m = cfun->machine->gp_save_rtx;
4888
4889 if (m == NULL)
4890 {
4891 start_sequence ();
4892
4893 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4894 m = validize_mem (m);
4895 emit_move_insn (m, pic_offset_table_rtx);
4896
4897 seq = get_insns ();
4898 end_sequence ();
4899
4900 /* We used to simply emit the sequence after entry_of_function.
4901 However this breaks the CFG if the first instruction in the
4902 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4903 label. Emit the sequence properly on the edge. We are only
4904 invoked from dw2_build_landing_pads and finish_eh_generation
4905 will call commit_edge_insertions thanks to a kludge. */
4906 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4907
4908 cfun->machine->gp_save_rtx = m;
4909 }
4910
4911 return m;
4912 }
4913
4914 static int
4915 alpha_ra_ever_killed (void)
4916 {
4917 rtx top;
4918
4919 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4920 return (int)df_regs_ever_live_p (REG_RA);
4921
4922 push_topmost_sequence ();
4923 top = get_insns ();
4924 pop_topmost_sequence ();
4925
4926 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4927 }
4928
4929 \f
4930 /* Return the trap mode suffix applicable to the current
4931 instruction, or NULL. */
4932
4933 static const char *
4934 get_trap_mode_suffix (void)
4935 {
4936 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4937
4938 switch (s)
4939 {
4940 case TRAP_SUFFIX_NONE:
4941 return NULL;
4942
4943 case TRAP_SUFFIX_SU:
4944 if (alpha_fptm >= ALPHA_FPTM_SU)
4945 return "su";
4946 return NULL;
4947
4948 case TRAP_SUFFIX_SUI:
4949 if (alpha_fptm >= ALPHA_FPTM_SUI)
4950 return "sui";
4951 return NULL;
4952
4953 case TRAP_SUFFIX_V_SV:
4954 switch (alpha_fptm)
4955 {
4956 case ALPHA_FPTM_N:
4957 return NULL;
4958 case ALPHA_FPTM_U:
4959 return "v";
4960 case ALPHA_FPTM_SU:
4961 case ALPHA_FPTM_SUI:
4962 return "sv";
4963 default:
4964 gcc_unreachable ();
4965 }
4966
4967 case TRAP_SUFFIX_V_SV_SVI:
4968 switch (alpha_fptm)
4969 {
4970 case ALPHA_FPTM_N:
4971 return NULL;
4972 case ALPHA_FPTM_U:
4973 return "v";
4974 case ALPHA_FPTM_SU:
4975 return "sv";
4976 case ALPHA_FPTM_SUI:
4977 return "svi";
4978 default:
4979 gcc_unreachable ();
4980 }
4981 break;
4982
4983 case TRAP_SUFFIX_U_SU_SUI:
4984 switch (alpha_fptm)
4985 {
4986 case ALPHA_FPTM_N:
4987 return NULL;
4988 case ALPHA_FPTM_U:
4989 return "u";
4990 case ALPHA_FPTM_SU:
4991 return "su";
4992 case ALPHA_FPTM_SUI:
4993 return "sui";
4994 default:
4995 gcc_unreachable ();
4996 }
4997 break;
4998
4999 default:
5000 gcc_unreachable ();
5001 }
5002 gcc_unreachable ();
5003 }
5004
5005 /* Return the rounding mode suffix applicable to the current
5006 instruction, or NULL. */
5007
5008 static const char *
5009 get_round_mode_suffix (void)
5010 {
5011 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
5012
5013 switch (s)
5014 {
5015 case ROUND_SUFFIX_NONE:
5016 return NULL;
5017 case ROUND_SUFFIX_NORMAL:
5018 switch (alpha_fprm)
5019 {
5020 case ALPHA_FPRM_NORM:
5021 return NULL;
5022 case ALPHA_FPRM_MINF:
5023 return "m";
5024 case ALPHA_FPRM_CHOP:
5025 return "c";
5026 case ALPHA_FPRM_DYN:
5027 return "d";
5028 default:
5029 gcc_unreachable ();
5030 }
5031 break;
5032
5033 case ROUND_SUFFIX_C:
5034 return "c";
5035
5036 default:
5037 gcc_unreachable ();
5038 }
5039 gcc_unreachable ();
5040 }
5041
5042 /* Locate some local-dynamic symbol still in use by this function
5043 so that we can print its name in some movdi_er_tlsldm pattern. */
5044
5045 static int
5046 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
5047 {
5048 rtx x = *px;
5049
5050 if (GET_CODE (x) == SYMBOL_REF
5051 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
5052 {
5053 cfun->machine->some_ld_name = XSTR (x, 0);
5054 return 1;
5055 }
5056
5057 return 0;
5058 }
5059
5060 static const char *
5061 get_some_local_dynamic_name (void)
5062 {
5063 rtx insn;
5064
5065 if (cfun->machine->some_ld_name)
5066 return cfun->machine->some_ld_name;
5067
5068 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
5069 if (INSN_P (insn)
5070 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
5071 return cfun->machine->some_ld_name;
5072
5073 gcc_unreachable ();
5074 }
5075
5076 /* Print an operand. Recognize special options, documented below. */
5077
5078 void
5079 print_operand (FILE *file, rtx x, int code)
5080 {
5081 int i;
5082
5083 switch (code)
5084 {
5085 case '~':
5086 /* Print the assembler name of the current function. */
5087 assemble_name (file, alpha_fnname);
5088 break;
5089
5090 case '&':
5091 assemble_name (file, get_some_local_dynamic_name ());
5092 break;
5093
5094 case '/':
5095 {
5096 const char *trap = get_trap_mode_suffix ();
5097 const char *round = get_round_mode_suffix ();
5098
5099 if (trap || round)
5100 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5101 (trap ? trap : ""), (round ? round : ""));
5102 break;
5103 }
5104
5105 case ',':
5106 /* Generates single precision instruction suffix. */
5107 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5108 break;
5109
5110 case '-':
5111 /* Generates double precision instruction suffix. */
5112 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5113 break;
5114
5115 case '#':
5116 if (alpha_this_literal_sequence_number == 0)
5117 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5118 fprintf (file, "%d", alpha_this_literal_sequence_number);
5119 break;
5120
5121 case '*':
5122 if (alpha_this_gpdisp_sequence_number == 0)
5123 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5124 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5125 break;
5126
5127 case 'H':
5128 if (GET_CODE (x) == HIGH)
5129 output_addr_const (file, XEXP (x, 0));
5130 else
5131 output_operand_lossage ("invalid %%H value");
5132 break;
5133
5134 case 'J':
5135 {
5136 const char *lituse;
5137
5138 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5139 {
5140 x = XVECEXP (x, 0, 0);
5141 lituse = "lituse_tlsgd";
5142 }
5143 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5144 {
5145 x = XVECEXP (x, 0, 0);
5146 lituse = "lituse_tlsldm";
5147 }
5148 else if (CONST_INT_P (x))
5149 lituse = "lituse_jsr";
5150 else
5151 {
5152 output_operand_lossage ("invalid %%J value");
5153 break;
5154 }
5155
5156 if (x != const0_rtx)
5157 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5158 }
5159 break;
5160
5161 case 'j':
5162 {
5163 const char *lituse;
5164
5165 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5166 lituse = "lituse_jsrdirect";
5167 #else
5168 lituse = "lituse_jsr";
5169 #endif
5170
5171 gcc_assert (INTVAL (x) != 0);
5172 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5173 }
5174 break;
5175 case 'r':
5176 /* If this operand is the constant zero, write it as "$31". */
5177 if (REG_P (x))
5178 fprintf (file, "%s", reg_names[REGNO (x)]);
5179 else if (x == CONST0_RTX (GET_MODE (x)))
5180 fprintf (file, "$31");
5181 else
5182 output_operand_lossage ("invalid %%r value");
5183 break;
5184
5185 case 'R':
5186 /* Similar, but for floating-point. */
5187 if (REG_P (x))
5188 fprintf (file, "%s", reg_names[REGNO (x)]);
5189 else if (x == CONST0_RTX (GET_MODE (x)))
5190 fprintf (file, "$f31");
5191 else
5192 output_operand_lossage ("invalid %%R value");
5193 break;
5194
5195 case 'N':
5196 /* Write the 1's complement of a constant. */
5197 if (!CONST_INT_P (x))
5198 output_operand_lossage ("invalid %%N value");
5199
5200 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5201 break;
5202
5203 case 'P':
5204 /* Write 1 << C, for a constant C. */
5205 if (!CONST_INT_P (x))
5206 output_operand_lossage ("invalid %%P value");
5207
5208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5209 break;
5210
5211 case 'h':
5212 /* Write the high-order 16 bits of a constant, sign-extended. */
5213 if (!CONST_INT_P (x))
5214 output_operand_lossage ("invalid %%h value");
5215
5216 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5217 break;
5218
5219 case 'L':
5220 /* Write the low-order 16 bits of a constant, sign-extended. */
5221 if (!CONST_INT_P (x))
5222 output_operand_lossage ("invalid %%L value");
5223
5224 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5225 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5226 break;
5227
5228 case 'm':
5229 /* Write mask for ZAP insn. */
5230 if (GET_CODE (x) == CONST_DOUBLE)
5231 {
5232 HOST_WIDE_INT mask = 0;
5233 HOST_WIDE_INT value;
5234
5235 value = CONST_DOUBLE_LOW (x);
5236 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5237 i++, value >>= 8)
5238 if (value & 0xff)
5239 mask |= (1 << i);
5240
5241 value = CONST_DOUBLE_HIGH (x);
5242 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5243 i++, value >>= 8)
5244 if (value & 0xff)
5245 mask |= (1 << (i + sizeof (int)));
5246
5247 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5248 }
5249
5250 else if (CONST_INT_P (x))
5251 {
5252 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5253
5254 for (i = 0; i < 8; i++, value >>= 8)
5255 if (value & 0xff)
5256 mask |= (1 << i);
5257
5258 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5259 }
5260 else
5261 output_operand_lossage ("invalid %%m value");
5262 break;
5263
5264 case 'M':
5265 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5266 if (!CONST_INT_P (x)
5267 || (INTVAL (x) != 8 && INTVAL (x) != 16
5268 && INTVAL (x) != 32 && INTVAL (x) != 64))
5269 output_operand_lossage ("invalid %%M value");
5270
5271 fprintf (file, "%s",
5272 (INTVAL (x) == 8 ? "b"
5273 : INTVAL (x) == 16 ? "w"
5274 : INTVAL (x) == 32 ? "l"
5275 : "q"));
5276 break;
5277
5278 case 'U':
5279 /* Similar, except do it from the mask. */
5280 if (CONST_INT_P (x))
5281 {
5282 HOST_WIDE_INT value = INTVAL (x);
5283
5284 if (value == 0xff)
5285 {
5286 fputc ('b', file);
5287 break;
5288 }
5289 if (value == 0xffff)
5290 {
5291 fputc ('w', file);
5292 break;
5293 }
5294 if (value == 0xffffffff)
5295 {
5296 fputc ('l', file);
5297 break;
5298 }
5299 if (value == -1)
5300 {
5301 fputc ('q', file);
5302 break;
5303 }
5304 }
5305 else if (HOST_BITS_PER_WIDE_INT == 32
5306 && GET_CODE (x) == CONST_DOUBLE
5307 && CONST_DOUBLE_LOW (x) == 0xffffffff
5308 && CONST_DOUBLE_HIGH (x) == 0)
5309 {
5310 fputc ('l', file);
5311 break;
5312 }
5313 output_operand_lossage ("invalid %%U value");
5314 break;
5315
5316 case 's':
5317 /* Write the constant value divided by 8 for little-endian mode or
5318 (56 - value) / 8 for big-endian mode. */
5319
5320 if (!CONST_INT_P (x)
5321 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5322 ? 56
5323 : 64)
5324 || (INTVAL (x) & 7) != 0)
5325 output_operand_lossage ("invalid %%s value");
5326
5327 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5328 WORDS_BIG_ENDIAN
5329 ? (56 - INTVAL (x)) / 8
5330 : INTVAL (x) / 8);
5331 break;
5332
5333 case 'S':
5334 /* Same, except compute (64 - c) / 8 */
5335
5336 if (!CONST_INT_P (x)
5337 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5338 && (INTVAL (x) & 7) != 8)
5339 output_operand_lossage ("invalid %%s value");
5340
5341 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5342 break;
5343
5344 case 't':
5345 {
5346 /* On Unicos/Mk systems: use a DEX expression if the symbol
5347 clashes with a register name. */
5348 int dex = unicosmk_need_dex (x);
5349 if (dex)
5350 fprintf (file, "DEX(%d)", dex);
5351 else
5352 output_addr_const (file, x);
5353 }
5354 break;
5355
5356 case 'C': case 'D': case 'c': case 'd':
5357 /* Write out comparison name. */
5358 {
5359 enum rtx_code c = GET_CODE (x);
5360
5361 if (!COMPARISON_P (x))
5362 output_operand_lossage ("invalid %%C value");
5363
5364 else if (code == 'D')
5365 c = reverse_condition (c);
5366 else if (code == 'c')
5367 c = swap_condition (c);
5368 else if (code == 'd')
5369 c = swap_condition (reverse_condition (c));
5370
5371 if (c == LEU)
5372 fprintf (file, "ule");
5373 else if (c == LTU)
5374 fprintf (file, "ult");
5375 else if (c == UNORDERED)
5376 fprintf (file, "un");
5377 else
5378 fprintf (file, "%s", GET_RTX_NAME (c));
5379 }
5380 break;
5381
5382 case 'E':
5383 /* Write the divide or modulus operator. */
5384 switch (GET_CODE (x))
5385 {
5386 case DIV:
5387 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5388 break;
5389 case UDIV:
5390 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5391 break;
5392 case MOD:
5393 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5394 break;
5395 case UMOD:
5396 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5397 break;
5398 default:
5399 output_operand_lossage ("invalid %%E value");
5400 break;
5401 }
5402 break;
5403
5404 case 'A':
5405 /* Write "_u" for unaligned access. */
5406 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5407 fprintf (file, "_u");
5408 break;
5409
5410 case 0:
5411 if (REG_P (x))
5412 fprintf (file, "%s", reg_names[REGNO (x)]);
5413 else if (MEM_P (x))
5414 output_address (XEXP (x, 0));
5415 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5416 {
5417 switch (XINT (XEXP (x, 0), 1))
5418 {
5419 case UNSPEC_DTPREL:
5420 case UNSPEC_TPREL:
5421 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5422 break;
5423 default:
5424 output_operand_lossage ("unknown relocation unspec");
5425 break;
5426 }
5427 }
5428 else
5429 output_addr_const (file, x);
5430 break;
5431
5432 default:
5433 output_operand_lossage ("invalid %%xn code");
5434 }
5435 }
5436
5437 void
5438 print_operand_address (FILE *file, rtx addr)
5439 {
5440 int basereg = 31;
5441 HOST_WIDE_INT offset = 0;
5442
5443 if (GET_CODE (addr) == AND)
5444 addr = XEXP (addr, 0);
5445
5446 if (GET_CODE (addr) == PLUS
5447 && CONST_INT_P (XEXP (addr, 1)))
5448 {
5449 offset = INTVAL (XEXP (addr, 1));
5450 addr = XEXP (addr, 0);
5451 }
5452
5453 if (GET_CODE (addr) == LO_SUM)
5454 {
5455 const char *reloc16, *reloclo;
5456 rtx op1 = XEXP (addr, 1);
5457
5458 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5459 {
5460 op1 = XEXP (op1, 0);
5461 switch (XINT (op1, 1))
5462 {
5463 case UNSPEC_DTPREL:
5464 reloc16 = NULL;
5465 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5466 break;
5467 case UNSPEC_TPREL:
5468 reloc16 = NULL;
5469 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5470 break;
5471 default:
5472 output_operand_lossage ("unknown relocation unspec");
5473 return;
5474 }
5475
5476 output_addr_const (file, XVECEXP (op1, 0, 0));
5477 }
5478 else
5479 {
5480 reloc16 = "gprel";
5481 reloclo = "gprellow";
5482 output_addr_const (file, op1);
5483 }
5484
5485 if (offset)
5486 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5487
5488 addr = XEXP (addr, 0);
5489 switch (GET_CODE (addr))
5490 {
5491 case REG:
5492 basereg = REGNO (addr);
5493 break;
5494
5495 case SUBREG:
5496 basereg = subreg_regno (addr);
5497 break;
5498
5499 default:
5500 gcc_unreachable ();
5501 }
5502
5503 fprintf (file, "($%d)\t\t!%s", basereg,
5504 (basereg == 29 ? reloc16 : reloclo));
5505 return;
5506 }
5507
5508 switch (GET_CODE (addr))
5509 {
5510 case REG:
5511 basereg = REGNO (addr);
5512 break;
5513
5514 case SUBREG:
5515 basereg = subreg_regno (addr);
5516 break;
5517
5518 case CONST_INT:
5519 offset = INTVAL (addr);
5520 break;
5521
5522 #if TARGET_ABI_OPEN_VMS
5523 case SYMBOL_REF:
5524 fprintf (file, "%s", XSTR (addr, 0));
5525 return;
5526
5527 case CONST:
5528 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5529 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5530 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5531 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5532 INTVAL (XEXP (XEXP (addr, 0), 1)));
5533 return;
5534
5535 #endif
5536 default:
5537 gcc_unreachable ();
5538 }
5539
5540 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5541 }
5542 \f
5543 /* Emit RTL insns to initialize the variable parts of a trampoline at
5544 TRAMP. FNADDR is an RTX for the address of the function's pure
5545 code. CXT is an RTX for the static chain value for the function.
5546
5547 The three offset parameters are for the individual template's
5548 layout. A JMPOFS < 0 indicates that the trampoline does not
5549 contain instructions at all.
5550
5551 We assume here that a function will be called many more times than
5552 its address is taken (e.g., it might be passed to qsort), so we
5553 take the trouble to initialize the "hint" field in the JMP insn.
5554 Note that the hint field is PC (new) + 4 * bits 13:0. */
5555
5556 void
5557 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5558 int fnofs, int cxtofs, int jmpofs)
5559 {
5560 rtx addr;
5561 /* VMS really uses DImode pointers in memory at this point. */
5562 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5563
5564 #ifdef POINTERS_EXTEND_UNSIGNED
5565 fnaddr = convert_memory_address (mode, fnaddr);
5566 cxt = convert_memory_address (mode, cxt);
5567 #endif
5568
5569 if (TARGET_ABI_OPEN_VMS)
5570 {
5571 rtx temp1, traddr;
5572 const char *fnname;
5573 char *trname;
5574
5575 /* Construct the name of the trampoline entry point. */
5576 fnname = XSTR (fnaddr, 0);
5577 trname = (char *) alloca (strlen (fnname) + 5);
5578 strcpy (trname, fnname);
5579 strcat (trname, "..tr");
5580 traddr = gen_rtx_SYMBOL_REF
5581 (mode, ggc_alloc_string (trname, strlen (trname) + 1));
5582
5583 /* Trampoline (or "bounded") procedure descriptor is constructed from
5584 the function's procedure descriptor with certain fields zeroed IAW
5585 the VMS calling standard. This is stored in the first quadword. */
5586 temp1 = force_reg (DImode, gen_rtx_MEM (DImode, fnaddr));
5587 temp1 = expand_and (DImode, temp1,
5588 GEN_INT (0xffff0fff0000fff0), NULL_RTX);
5589 addr = memory_address (mode, plus_constant (tramp, 0));
5590 emit_move_insn (gen_rtx_MEM (DImode, addr), temp1);
5591
5592 /* Trampoline transfer address is stored in the second quadword
5593 of the trampoline. */
5594 addr = memory_address (mode, plus_constant (tramp, 8));
5595 emit_move_insn (gen_rtx_MEM (mode, addr), traddr);
5596 }
5597
5598 /* Store function address and CXT. */
5599 addr = memory_address (mode, plus_constant (tramp, fnofs));
5600 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5601 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5602 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5603
5604 #ifdef ENABLE_EXECUTE_STACK
5605 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5606 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
5607 #endif
5608
5609 if (jmpofs >= 0)
5610 emit_insn (gen_imb ());
5611 }
5612 \f
5613 /* Determine where to put an argument to a function.
5614 Value is zero to push the argument on the stack,
5615 or a hard register in which to store the argument.
5616
5617 MODE is the argument's machine mode.
5618 TYPE is the data type of the argument (as a tree).
5619 This is null for libcalls where that information may
5620 not be available.
5621 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5622 the preceding args and about the function being called.
5623 NAMED is nonzero if this argument is a named parameter
5624 (otherwise it is an extra parameter matching an ellipsis).
5625
5626 On Alpha the first 6 words of args are normally in registers
5627 and the rest are pushed. */
5628
5629 rtx
5630 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5631 int named ATTRIBUTE_UNUSED)
5632 {
5633 int basereg;
5634 int num_args;
5635
5636 /* Don't get confused and pass small structures in FP registers. */
5637 if (type && AGGREGATE_TYPE_P (type))
5638 basereg = 16;
5639 else
5640 {
5641 #ifdef ENABLE_CHECKING
5642 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5643 values here. */
5644 gcc_assert (!COMPLEX_MODE_P (mode));
5645 #endif
5646
5647 /* Set up defaults for FP operands passed in FP registers, and
5648 integral operands passed in integer registers. */
5649 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5650 basereg = 32 + 16;
5651 else
5652 basereg = 16;
5653 }
5654
5655 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5656 the three platforms, so we can't avoid conditional compilation. */
5657 #if TARGET_ABI_OPEN_VMS
5658 {
5659 if (mode == VOIDmode)
5660 return alpha_arg_info_reg_val (cum);
5661
5662 num_args = cum.num_args;
5663 if (num_args >= 6
5664 || targetm.calls.must_pass_in_stack (mode, type))
5665 return NULL_RTX;
5666 }
5667 #elif TARGET_ABI_UNICOSMK
5668 {
5669 int size;
5670
5671 /* If this is the last argument, generate the call info word (CIW). */
5672 /* ??? We don't include the caller's line number in the CIW because
5673 I don't know how to determine it if debug infos are turned off. */
5674 if (mode == VOIDmode)
5675 {
5676 int i;
5677 HOST_WIDE_INT lo;
5678 HOST_WIDE_INT hi;
5679 rtx ciw;
5680
5681 lo = 0;
5682
5683 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5684 if (cum.reg_args_type[i])
5685 lo |= (1 << (7 - i));
5686
5687 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5688 lo |= 7;
5689 else
5690 lo |= cum.num_reg_words;
5691
5692 #if HOST_BITS_PER_WIDE_INT == 32
5693 hi = (cum.num_args << 20) | cum.num_arg_words;
5694 #else
5695 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5696 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5697 hi = 0;
5698 #endif
5699 ciw = immed_double_const (lo, hi, DImode);
5700
5701 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5702 UNSPEC_UMK_LOAD_CIW);
5703 }
5704
5705 size = ALPHA_ARG_SIZE (mode, type, named);
5706 num_args = cum.num_reg_words;
5707 if (cum.force_stack
5708 || cum.num_reg_words + size > 6
5709 || targetm.calls.must_pass_in_stack (mode, type))
5710 return NULL_RTX;
5711 else if (type && TYPE_MODE (type) == BLKmode)
5712 {
5713 rtx reg1, reg2;
5714
5715 reg1 = gen_rtx_REG (DImode, num_args + 16);
5716 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5717
5718 /* The argument fits in two registers. Note that we still need to
5719 reserve a register for empty structures. */
5720 if (size == 0)
5721 return NULL_RTX;
5722 else if (size == 1)
5723 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5724 else
5725 {
5726 reg2 = gen_rtx_REG (DImode, num_args + 17);
5727 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5728 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5729 }
5730 }
5731 }
5732 #elif TARGET_ABI_OSF
5733 {
5734 if (cum >= 6)
5735 return NULL_RTX;
5736 num_args = cum;
5737
5738 /* VOID is passed as a special flag for "last argument". */
5739 if (type == void_type_node)
5740 basereg = 16;
5741 else if (targetm.calls.must_pass_in_stack (mode, type))
5742 return NULL_RTX;
5743 }
5744 #else
5745 #error Unhandled ABI
5746 #endif
5747
5748 return gen_rtx_REG (mode, num_args + basereg);
5749 }
5750
5751 static int
5752 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5753 enum machine_mode mode ATTRIBUTE_UNUSED,
5754 tree type ATTRIBUTE_UNUSED,
5755 bool named ATTRIBUTE_UNUSED)
5756 {
5757 int words = 0;
5758
5759 #if TARGET_ABI_OPEN_VMS
5760 if (cum->num_args < 6
5761 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5762 words = 6 - cum->num_args;
5763 #elif TARGET_ABI_UNICOSMK
5764 /* Never any split arguments. */
5765 #elif TARGET_ABI_OSF
5766 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5767 words = 6 - *cum;
5768 #else
5769 #error Unhandled ABI
5770 #endif
5771
5772 return words * UNITS_PER_WORD;
5773 }
5774
5775
5776 /* Return true if TYPE must be returned in memory, instead of in registers. */
5777
5778 static bool
5779 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5780 {
5781 enum machine_mode mode = VOIDmode;
5782 int size;
5783
5784 if (type)
5785 {
5786 mode = TYPE_MODE (type);
5787
5788 /* All aggregates are returned in memory, except on OpenVMS where
5789 records that fit 64 bits should be returned by immediate value
5790 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5791 if (TARGET_ABI_OPEN_VMS
5792 && TREE_CODE (type) != ARRAY_TYPE
5793 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5794 return false;
5795
5796 if (AGGREGATE_TYPE_P (type))
5797 return true;
5798 }
5799
5800 size = GET_MODE_SIZE (mode);
5801 switch (GET_MODE_CLASS (mode))
5802 {
5803 case MODE_VECTOR_FLOAT:
5804 /* Pass all float vectors in memory, like an aggregate. */
5805 return true;
5806
5807 case MODE_COMPLEX_FLOAT:
5808 /* We judge complex floats on the size of their element,
5809 not the size of the whole type. */
5810 size = GET_MODE_UNIT_SIZE (mode);
5811 break;
5812
5813 case MODE_INT:
5814 case MODE_FLOAT:
5815 case MODE_COMPLEX_INT:
5816 case MODE_VECTOR_INT:
5817 break;
5818
5819 default:
5820 /* ??? We get called on all sorts of random stuff from
5821 aggregate_value_p. We must return something, but it's not
5822 clear what's safe to return. Pretend it's a struct I
5823 guess. */
5824 return true;
5825 }
5826
5827 /* Otherwise types must fit in one register. */
5828 return size > UNITS_PER_WORD;
5829 }
5830
5831 /* Return true if TYPE should be passed by invisible reference. */
5832
5833 static bool
5834 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5835 enum machine_mode mode,
5836 const_tree type ATTRIBUTE_UNUSED,
5837 bool named ATTRIBUTE_UNUSED)
5838 {
5839 return mode == TFmode || mode == TCmode;
5840 }
5841
5842 /* Define how to find the value returned by a function. VALTYPE is the
5843 data type of the value (as a tree). If the precise function being
5844 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5845 MODE is set instead of VALTYPE for libcalls.
5846
5847 On Alpha the value is found in $0 for integer functions and
5848 $f0 for floating-point functions. */
5849
5850 rtx
5851 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5852 enum machine_mode mode)
5853 {
5854 unsigned int regnum, dummy;
5855 enum mode_class mclass;
5856
5857 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5858
5859 if (valtype)
5860 mode = TYPE_MODE (valtype);
5861
5862 mclass = GET_MODE_CLASS (mode);
5863 switch (mclass)
5864 {
5865 case MODE_INT:
5866 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5867 where we have them returning both SImode and DImode. */
5868 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5869 PROMOTE_MODE (mode, dummy, valtype);
5870 /* FALLTHRU */
5871
5872 case MODE_COMPLEX_INT:
5873 case MODE_VECTOR_INT:
5874 regnum = 0;
5875 break;
5876
5877 case MODE_FLOAT:
5878 regnum = 32;
5879 break;
5880
5881 case MODE_COMPLEX_FLOAT:
5882 {
5883 enum machine_mode cmode = GET_MODE_INNER (mode);
5884
5885 return gen_rtx_PARALLEL
5886 (VOIDmode,
5887 gen_rtvec (2,
5888 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5889 const0_rtx),
5890 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5891 GEN_INT (GET_MODE_SIZE (cmode)))));
5892 }
5893
5894 case MODE_RANDOM:
5895 /* We should only reach here for BLKmode on VMS. */
5896 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5897 regnum = 0;
5898 break;
5899
5900 default:
5901 gcc_unreachable ();
5902 }
5903
5904 return gen_rtx_REG (mode, regnum);
5905 }
5906
5907 /* TCmode complex values are passed by invisible reference. We
5908 should not split these values. */
5909
5910 static bool
5911 alpha_split_complex_arg (const_tree type)
5912 {
5913 return TYPE_MODE (type) != TCmode;
5914 }
5915
5916 static tree
5917 alpha_build_builtin_va_list (void)
5918 {
5919 tree base, ofs, space, record, type_decl;
5920
5921 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5922 return ptr_type_node;
5923
5924 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5925 type_decl = build_decl (BUILTINS_LOCATION,
5926 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5927 TREE_CHAIN (record) = type_decl;
5928 TYPE_NAME (record) = type_decl;
5929
5930 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5931
5932 /* Dummy field to prevent alignment warnings. */
5933 space = build_decl (BUILTINS_LOCATION,
5934 FIELD_DECL, NULL_TREE, integer_type_node);
5935 DECL_FIELD_CONTEXT (space) = record;
5936 DECL_ARTIFICIAL (space) = 1;
5937 DECL_IGNORED_P (space) = 1;
5938
5939 ofs = build_decl (BUILTINS_LOCATION,
5940 FIELD_DECL, get_identifier ("__offset"),
5941 integer_type_node);
5942 DECL_FIELD_CONTEXT (ofs) = record;
5943 TREE_CHAIN (ofs) = space;
5944
5945 base = build_decl (BUILTINS_LOCATION,
5946 FIELD_DECL, get_identifier ("__base"),
5947 ptr_type_node);
5948 DECL_FIELD_CONTEXT (base) = record;
5949 TREE_CHAIN (base) = ofs;
5950
5951 TYPE_FIELDS (record) = base;
5952 layout_type (record);
5953
5954 va_list_gpr_counter_field = ofs;
5955 return record;
5956 }
5957
5958 #if TARGET_ABI_OSF
5959 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5960 and constant additions. */
5961
5962 static gimple
5963 va_list_skip_additions (tree lhs)
5964 {
5965 gimple stmt;
5966
5967 for (;;)
5968 {
5969 enum tree_code code;
5970
5971 stmt = SSA_NAME_DEF_STMT (lhs);
5972
5973 if (gimple_code (stmt) == GIMPLE_PHI)
5974 return stmt;
5975
5976 if (!is_gimple_assign (stmt)
5977 || gimple_assign_lhs (stmt) != lhs)
5978 return NULL;
5979
5980 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5981 return stmt;
5982 code = gimple_assign_rhs_code (stmt);
5983 if (!CONVERT_EXPR_CODE_P (code)
5984 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5985 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5986 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5987 return stmt;
5988
5989 lhs = gimple_assign_rhs1 (stmt);
5990 }
5991 }
5992
5993 /* Check if LHS = RHS statement is
5994 LHS = *(ap.__base + ap.__offset + cst)
5995 or
5996 LHS = *(ap.__base
5997 + ((ap.__offset + cst <= 47)
5998 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5999 If the former, indicate that GPR registers are needed,
6000 if the latter, indicate that FPR registers are needed.
6001
6002 Also look for LHS = (*ptr).field, where ptr is one of the forms
6003 listed above.
6004
6005 On alpha, cfun->va_list_gpr_size is used as size of the needed
6006 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
6007 registers are needed and bit 1 set if FPR registers are needed.
6008 Return true if va_list references should not be scanned for the
6009 current statement. */
6010
6011 static bool
6012 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
6013 {
6014 tree base, offset, rhs;
6015 int offset_arg = 1;
6016 gimple base_stmt;
6017
6018 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
6019 != GIMPLE_SINGLE_RHS)
6020 return false;
6021
6022 rhs = gimple_assign_rhs1 (stmt);
6023 while (handled_component_p (rhs))
6024 rhs = TREE_OPERAND (rhs, 0);
6025 if (TREE_CODE (rhs) != INDIRECT_REF
6026 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
6027 return false;
6028
6029 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
6030 if (stmt == NULL
6031 || !is_gimple_assign (stmt)
6032 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
6033 return false;
6034
6035 base = gimple_assign_rhs1 (stmt);
6036 if (TREE_CODE (base) == SSA_NAME)
6037 {
6038 base_stmt = va_list_skip_additions (base);
6039 if (base_stmt
6040 && is_gimple_assign (base_stmt)
6041 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6042 base = gimple_assign_rhs1 (base_stmt);
6043 }
6044
6045 if (TREE_CODE (base) != COMPONENT_REF
6046 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6047 {
6048 base = gimple_assign_rhs2 (stmt);
6049 if (TREE_CODE (base) == SSA_NAME)
6050 {
6051 base_stmt = va_list_skip_additions (base);
6052 if (base_stmt
6053 && is_gimple_assign (base_stmt)
6054 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
6055 base = gimple_assign_rhs1 (base_stmt);
6056 }
6057
6058 if (TREE_CODE (base) != COMPONENT_REF
6059 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
6060 return false;
6061
6062 offset_arg = 0;
6063 }
6064
6065 base = get_base_address (base);
6066 if (TREE_CODE (base) != VAR_DECL
6067 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
6068 return false;
6069
6070 offset = gimple_op (stmt, 1 + offset_arg);
6071 if (TREE_CODE (offset) == SSA_NAME)
6072 {
6073 gimple offset_stmt = va_list_skip_additions (offset);
6074
6075 if (offset_stmt
6076 && gimple_code (offset_stmt) == GIMPLE_PHI)
6077 {
6078 HOST_WIDE_INT sub;
6079 gimple arg1_stmt, arg2_stmt;
6080 tree arg1, arg2;
6081 enum tree_code code1, code2;
6082
6083 if (gimple_phi_num_args (offset_stmt) != 2)
6084 goto escapes;
6085
6086 arg1_stmt
6087 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
6088 arg2_stmt
6089 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
6090 if (arg1_stmt == NULL
6091 || !is_gimple_assign (arg1_stmt)
6092 || arg2_stmt == NULL
6093 || !is_gimple_assign (arg2_stmt))
6094 goto escapes;
6095
6096 code1 = gimple_assign_rhs_code (arg1_stmt);
6097 code2 = gimple_assign_rhs_code (arg2_stmt);
6098 if (code1 == COMPONENT_REF
6099 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
6100 /* Do nothing. */;
6101 else if (code2 == COMPONENT_REF
6102 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
6103 {
6104 gimple tem = arg1_stmt;
6105 code2 = code1;
6106 arg1_stmt = arg2_stmt;
6107 arg2_stmt = tem;
6108 }
6109 else
6110 goto escapes;
6111
6112 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
6113 goto escapes;
6114
6115 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
6116 if (code2 == MINUS_EXPR)
6117 sub = -sub;
6118 if (sub < -48 || sub > -32)
6119 goto escapes;
6120
6121 arg1 = gimple_assign_rhs1 (arg1_stmt);
6122 arg2 = gimple_assign_rhs1 (arg2_stmt);
6123 if (TREE_CODE (arg2) == SSA_NAME)
6124 {
6125 arg2_stmt = va_list_skip_additions (arg2);
6126 if (arg2_stmt == NULL
6127 || !is_gimple_assign (arg2_stmt)
6128 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
6129 goto escapes;
6130 arg2 = gimple_assign_rhs1 (arg2_stmt);
6131 }
6132 if (arg1 != arg2)
6133 goto escapes;
6134
6135 if (TREE_CODE (arg1) != COMPONENT_REF
6136 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6137 || get_base_address (arg1) != base)
6138 goto escapes;
6139
6140 /* Need floating point regs. */
6141 cfun->va_list_fpr_size |= 2;
6142 return false;
6143 }
6144 if (offset_stmt
6145 && is_gimple_assign (offset_stmt)
6146 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6147 offset = gimple_assign_rhs1 (offset_stmt);
6148 }
6149 if (TREE_CODE (offset) != COMPONENT_REF
6150 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6151 || get_base_address (offset) != base)
6152 goto escapes;
6153 else
6154 /* Need general regs. */
6155 cfun->va_list_fpr_size |= 1;
6156 return false;
6157
6158 escapes:
6159 si->va_list_escapes = true;
6160 return false;
6161 }
6162 #endif
6163
6164 /* Perform any needed actions needed for a function that is receiving a
6165 variable number of arguments. */
6166
6167 static void
6168 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6169 tree type, int *pretend_size, int no_rtl)
6170 {
6171 CUMULATIVE_ARGS cum = *pcum;
6172
6173 /* Skip the current argument. */
6174 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6175
6176 #if TARGET_ABI_UNICOSMK
6177 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6178 arguments on the stack. Unfortunately, it doesn't always store the first
6179 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6180 with stdargs as we always have at least one named argument there. */
6181 if (cum.num_reg_words < 6)
6182 {
6183 if (!no_rtl)
6184 {
6185 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6186 emit_insn (gen_arg_home_umk ());
6187 }
6188 *pretend_size = 0;
6189 }
6190 #elif TARGET_ABI_OPEN_VMS
6191 /* For VMS, we allocate space for all 6 arg registers plus a count.
6192
6193 However, if NO registers need to be saved, don't allocate any space.
6194 This is not only because we won't need the space, but because AP
6195 includes the current_pretend_args_size and we don't want to mess up
6196 any ap-relative addresses already made. */
6197 if (cum.num_args < 6)
6198 {
6199 if (!no_rtl)
6200 {
6201 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6202 emit_insn (gen_arg_home ());
6203 }
6204 *pretend_size = 7 * UNITS_PER_WORD;
6205 }
6206 #else
6207 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6208 only push those that are remaining. However, if NO registers need to
6209 be saved, don't allocate any space. This is not only because we won't
6210 need the space, but because AP includes the current_pretend_args_size
6211 and we don't want to mess up any ap-relative addresses already made.
6212
6213 If we are not to use the floating-point registers, save the integer
6214 registers where we would put the floating-point registers. This is
6215 not the most efficient way to implement varargs with just one register
6216 class, but it isn't worth doing anything more efficient in this rare
6217 case. */
6218 if (cum >= 6)
6219 return;
6220
6221 if (!no_rtl)
6222 {
6223 int count;
6224 alias_set_type set = get_varargs_alias_set ();
6225 rtx tmp;
6226
6227 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6228 if (count > 6 - cum)
6229 count = 6 - cum;
6230
6231 /* Detect whether integer registers or floating-point registers
6232 are needed by the detected va_arg statements. See above for
6233 how these values are computed. Note that the "escape" value
6234 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6235 these bits set. */
6236 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6237
6238 if (cfun->va_list_fpr_size & 1)
6239 {
6240 tmp = gen_rtx_MEM (BLKmode,
6241 plus_constant (virtual_incoming_args_rtx,
6242 (cum + 6) * UNITS_PER_WORD));
6243 MEM_NOTRAP_P (tmp) = 1;
6244 set_mem_alias_set (tmp, set);
6245 move_block_from_reg (16 + cum, tmp, count);
6246 }
6247
6248 if (cfun->va_list_fpr_size & 2)
6249 {
6250 tmp = gen_rtx_MEM (BLKmode,
6251 plus_constant (virtual_incoming_args_rtx,
6252 cum * UNITS_PER_WORD));
6253 MEM_NOTRAP_P (tmp) = 1;
6254 set_mem_alias_set (tmp, set);
6255 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6256 }
6257 }
6258 *pretend_size = 12 * UNITS_PER_WORD;
6259 #endif
6260 }
6261
6262 static void
6263 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6264 {
6265 HOST_WIDE_INT offset;
6266 tree t, offset_field, base_field;
6267
6268 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6269 return;
6270
6271 if (TARGET_ABI_UNICOSMK)
6272 std_expand_builtin_va_start (valist, nextarg);
6273
6274 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6275 up by 48, storing fp arg registers in the first 48 bytes, and the
6276 integer arg registers in the next 48 bytes. This is only done,
6277 however, if any integer registers need to be stored.
6278
6279 If no integer registers need be stored, then we must subtract 48
6280 in order to account for the integer arg registers which are counted
6281 in argsize above, but which are not actually stored on the stack.
6282 Must further be careful here about structures straddling the last
6283 integer argument register; that futzes with pretend_args_size,
6284 which changes the meaning of AP. */
6285
6286 if (NUM_ARGS < 6)
6287 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6288 else
6289 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6290
6291 if (TARGET_ABI_OPEN_VMS)
6292 {
6293 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6294 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6295 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6296 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6297 TREE_SIDE_EFFECTS (t) = 1;
6298 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6299 }
6300 else
6301 {
6302 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6303 offset_field = TREE_CHAIN (base_field);
6304
6305 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6306 valist, base_field, NULL_TREE);
6307 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6308 valist, offset_field, NULL_TREE);
6309
6310 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6311 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6312 size_int (offset));
6313 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6314 TREE_SIDE_EFFECTS (t) = 1;
6315 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6316
6317 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6318 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6319 TREE_SIDE_EFFECTS (t) = 1;
6320 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6321 }
6322 }
6323
6324 static tree
6325 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6326 gimple_seq *pre_p)
6327 {
6328 tree type_size, ptr_type, addend, t, addr;
6329 gimple_seq internal_post;
6330
6331 /* If the type could not be passed in registers, skip the block
6332 reserved for the registers. */
6333 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6334 {
6335 t = build_int_cst (TREE_TYPE (offset), 6*8);
6336 gimplify_assign (offset,
6337 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6338 pre_p);
6339 }
6340
6341 addend = offset;
6342 ptr_type = build_pointer_type (type);
6343
6344 if (TREE_CODE (type) == COMPLEX_TYPE)
6345 {
6346 tree real_part, imag_part, real_temp;
6347
6348 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6349 offset, pre_p);
6350
6351 /* Copy the value into a new temporary, lest the formal temporary
6352 be reused out from under us. */
6353 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6354
6355 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6356 offset, pre_p);
6357
6358 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6359 }
6360 else if (TREE_CODE (type) == REAL_TYPE)
6361 {
6362 tree fpaddend, cond, fourtyeight;
6363
6364 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6365 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6366 addend, fourtyeight);
6367 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6368 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6369 fpaddend, addend);
6370 }
6371
6372 /* Build the final address and force that value into a temporary. */
6373 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6374 fold_convert (sizetype, addend));
6375 internal_post = NULL;
6376 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6377 gimple_seq_add_seq (pre_p, internal_post);
6378
6379 /* Update the offset field. */
6380 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6381 if (type_size == NULL || TREE_OVERFLOW (type_size))
6382 t = size_zero_node;
6383 else
6384 {
6385 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6386 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6387 t = size_binop (MULT_EXPR, t, size_int (8));
6388 }
6389 t = fold_convert (TREE_TYPE (offset), t);
6390 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6391 pre_p);
6392
6393 return build_va_arg_indirect_ref (addr);
6394 }
6395
6396 static tree
6397 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6398 gimple_seq *post_p)
6399 {
6400 tree offset_field, base_field, offset, base, t, r;
6401 bool indirect;
6402
6403 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6404 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6405
6406 base_field = TYPE_FIELDS (va_list_type_node);
6407 offset_field = TREE_CHAIN (base_field);
6408 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6409 valist, base_field, NULL_TREE);
6410 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6411 valist, offset_field, NULL_TREE);
6412
6413 /* Pull the fields of the structure out into temporaries. Since we never
6414 modify the base field, we can use a formal temporary. Sign-extend the
6415 offset field so that it's the proper width for pointer arithmetic. */
6416 base = get_formal_tmp_var (base_field, pre_p);
6417
6418 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6419 offset = get_initialized_tmp_var (t, pre_p, NULL);
6420
6421 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6422 if (indirect)
6423 type = build_pointer_type (type);
6424
6425 /* Find the value. Note that this will be a stable indirection, or
6426 a composite of stable indirections in the case of complex. */
6427 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6428
6429 /* Stuff the offset temporary back into its field. */
6430 gimplify_assign (unshare_expr (offset_field),
6431 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6432
6433 if (indirect)
6434 r = build_va_arg_indirect_ref (r);
6435
6436 return r;
6437 }
6438 \f
6439 /* Builtins. */
6440
6441 enum alpha_builtin
6442 {
6443 ALPHA_BUILTIN_CMPBGE,
6444 ALPHA_BUILTIN_EXTBL,
6445 ALPHA_BUILTIN_EXTWL,
6446 ALPHA_BUILTIN_EXTLL,
6447 ALPHA_BUILTIN_EXTQL,
6448 ALPHA_BUILTIN_EXTWH,
6449 ALPHA_BUILTIN_EXTLH,
6450 ALPHA_BUILTIN_EXTQH,
6451 ALPHA_BUILTIN_INSBL,
6452 ALPHA_BUILTIN_INSWL,
6453 ALPHA_BUILTIN_INSLL,
6454 ALPHA_BUILTIN_INSQL,
6455 ALPHA_BUILTIN_INSWH,
6456 ALPHA_BUILTIN_INSLH,
6457 ALPHA_BUILTIN_INSQH,
6458 ALPHA_BUILTIN_MSKBL,
6459 ALPHA_BUILTIN_MSKWL,
6460 ALPHA_BUILTIN_MSKLL,
6461 ALPHA_BUILTIN_MSKQL,
6462 ALPHA_BUILTIN_MSKWH,
6463 ALPHA_BUILTIN_MSKLH,
6464 ALPHA_BUILTIN_MSKQH,
6465 ALPHA_BUILTIN_UMULH,
6466 ALPHA_BUILTIN_ZAP,
6467 ALPHA_BUILTIN_ZAPNOT,
6468 ALPHA_BUILTIN_AMASK,
6469 ALPHA_BUILTIN_IMPLVER,
6470 ALPHA_BUILTIN_RPCC,
6471 ALPHA_BUILTIN_THREAD_POINTER,
6472 ALPHA_BUILTIN_SET_THREAD_POINTER,
6473 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6474 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6475
6476 /* TARGET_MAX */
6477 ALPHA_BUILTIN_MINUB8,
6478 ALPHA_BUILTIN_MINSB8,
6479 ALPHA_BUILTIN_MINUW4,
6480 ALPHA_BUILTIN_MINSW4,
6481 ALPHA_BUILTIN_MAXUB8,
6482 ALPHA_BUILTIN_MAXSB8,
6483 ALPHA_BUILTIN_MAXUW4,
6484 ALPHA_BUILTIN_MAXSW4,
6485 ALPHA_BUILTIN_PERR,
6486 ALPHA_BUILTIN_PKLB,
6487 ALPHA_BUILTIN_PKWB,
6488 ALPHA_BUILTIN_UNPKBL,
6489 ALPHA_BUILTIN_UNPKBW,
6490
6491 /* TARGET_CIX */
6492 ALPHA_BUILTIN_CTTZ,
6493 ALPHA_BUILTIN_CTLZ,
6494 ALPHA_BUILTIN_CTPOP,
6495
6496 ALPHA_BUILTIN_max
6497 };
6498
6499 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6500 CODE_FOR_builtin_cmpbge,
6501 CODE_FOR_builtin_extbl,
6502 CODE_FOR_builtin_extwl,
6503 CODE_FOR_builtin_extll,
6504 CODE_FOR_builtin_extql,
6505 CODE_FOR_builtin_extwh,
6506 CODE_FOR_builtin_extlh,
6507 CODE_FOR_builtin_extqh,
6508 CODE_FOR_builtin_insbl,
6509 CODE_FOR_builtin_inswl,
6510 CODE_FOR_builtin_insll,
6511 CODE_FOR_builtin_insql,
6512 CODE_FOR_builtin_inswh,
6513 CODE_FOR_builtin_inslh,
6514 CODE_FOR_builtin_insqh,
6515 CODE_FOR_builtin_mskbl,
6516 CODE_FOR_builtin_mskwl,
6517 CODE_FOR_builtin_mskll,
6518 CODE_FOR_builtin_mskql,
6519 CODE_FOR_builtin_mskwh,
6520 CODE_FOR_builtin_msklh,
6521 CODE_FOR_builtin_mskqh,
6522 CODE_FOR_umuldi3_highpart,
6523 CODE_FOR_builtin_zap,
6524 CODE_FOR_builtin_zapnot,
6525 CODE_FOR_builtin_amask,
6526 CODE_FOR_builtin_implver,
6527 CODE_FOR_builtin_rpcc,
6528 CODE_FOR_load_tp,
6529 CODE_FOR_set_tp,
6530 CODE_FOR_builtin_establish_vms_condition_handler,
6531 CODE_FOR_builtin_revert_vms_condition_handler,
6532
6533 /* TARGET_MAX */
6534 CODE_FOR_builtin_minub8,
6535 CODE_FOR_builtin_minsb8,
6536 CODE_FOR_builtin_minuw4,
6537 CODE_FOR_builtin_minsw4,
6538 CODE_FOR_builtin_maxub8,
6539 CODE_FOR_builtin_maxsb8,
6540 CODE_FOR_builtin_maxuw4,
6541 CODE_FOR_builtin_maxsw4,
6542 CODE_FOR_builtin_perr,
6543 CODE_FOR_builtin_pklb,
6544 CODE_FOR_builtin_pkwb,
6545 CODE_FOR_builtin_unpkbl,
6546 CODE_FOR_builtin_unpkbw,
6547
6548 /* TARGET_CIX */
6549 CODE_FOR_ctzdi2,
6550 CODE_FOR_clzdi2,
6551 CODE_FOR_popcountdi2
6552 };
6553
6554 struct alpha_builtin_def
6555 {
6556 const char *name;
6557 enum alpha_builtin code;
6558 unsigned int target_mask;
6559 bool is_const;
6560 };
6561
6562 static struct alpha_builtin_def const zero_arg_builtins[] = {
6563 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6564 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6565 };
6566
6567 static struct alpha_builtin_def const one_arg_builtins[] = {
6568 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6569 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6570 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6571 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6572 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6573 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6574 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6575 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6576 };
6577
6578 static struct alpha_builtin_def const two_arg_builtins[] = {
6579 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6580 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6581 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6582 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6583 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6584 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6585 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6586 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6587 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6588 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6589 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6590 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6591 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6592 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6593 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6594 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6595 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6596 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6597 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6598 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6599 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6600 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6601 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6602 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6603 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6604 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6605 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6606 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6607 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6608 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6609 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6610 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6611 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6612 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6613 };
6614
6615 static GTY(()) tree alpha_v8qi_u;
6616 static GTY(()) tree alpha_v8qi_s;
6617 static GTY(()) tree alpha_v4hi_u;
6618 static GTY(()) tree alpha_v4hi_s;
6619
6620 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6621 functions pointed to by P, with function type FTYPE. */
6622
6623 static void
6624 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6625 tree ftype)
6626 {
6627 tree decl;
6628 size_t i;
6629
6630 for (i = 0; i < count; ++i, ++p)
6631 if ((target_flags & p->target_mask) == p->target_mask)
6632 {
6633 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6634 NULL, NULL);
6635 if (p->is_const)
6636 TREE_READONLY (decl) = 1;
6637 TREE_NOTHROW (decl) = 1;
6638 }
6639 }
6640
6641
6642 static void
6643 alpha_init_builtins (void)
6644 {
6645 tree dimode_integer_type_node;
6646 tree ftype, decl;
6647
6648 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6649
6650 /* Fwrite on VMS is non-standard. */
6651 #if TARGET_ABI_OPEN_VMS
6652 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6653 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6654 #endif
6655
6656 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6657 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6658 ftype);
6659
6660 ftype = build_function_type_list (dimode_integer_type_node,
6661 dimode_integer_type_node, NULL_TREE);
6662 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6663 ftype);
6664
6665 ftype = build_function_type_list (dimode_integer_type_node,
6666 dimode_integer_type_node,
6667 dimode_integer_type_node, NULL_TREE);
6668 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6669 ftype);
6670
6671 ftype = build_function_type (ptr_type_node, void_list_node);
6672 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6673 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6674 NULL, NULL);
6675 TREE_NOTHROW (decl) = 1;
6676
6677 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6678 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6679 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6680 NULL, NULL);
6681 TREE_NOTHROW (decl) = 1;
6682
6683 if (TARGET_ABI_OPEN_VMS)
6684 {
6685 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6686 NULL_TREE);
6687 add_builtin_function ("__builtin_establish_vms_condition_handler", ftype,
6688 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6689 BUILT_IN_MD, NULL, NULL_TREE);
6690
6691 ftype = build_function_type_list (ptr_type_node, void_type_node,
6692 NULL_TREE);
6693 add_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6694 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6695 BUILT_IN_MD, NULL, NULL_TREE);
6696 }
6697
6698 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6699 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6700 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6701 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6702 }
6703
6704 /* Expand an expression EXP that calls a built-in function,
6705 with result going to TARGET if that's convenient
6706 (and in mode MODE if that's convenient).
6707 SUBTARGET may be used as the target for computing one of EXP's operands.
6708 IGNORE is nonzero if the value is to be ignored. */
6709
6710 static rtx
6711 alpha_expand_builtin (tree exp, rtx target,
6712 rtx subtarget ATTRIBUTE_UNUSED,
6713 enum machine_mode mode ATTRIBUTE_UNUSED,
6714 int ignore ATTRIBUTE_UNUSED)
6715 {
6716 #define MAX_ARGS 2
6717
6718 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6719 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6720 tree arg;
6721 call_expr_arg_iterator iter;
6722 enum insn_code icode;
6723 rtx op[MAX_ARGS], pat;
6724 int arity;
6725 bool nonvoid;
6726
6727 if (fcode >= ALPHA_BUILTIN_max)
6728 internal_error ("bad builtin fcode");
6729 icode = code_for_builtin[fcode];
6730 if (icode == 0)
6731 internal_error ("bad builtin fcode");
6732
6733 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6734
6735 arity = 0;
6736 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6737 {
6738 const struct insn_operand_data *insn_op;
6739
6740 if (arg == error_mark_node)
6741 return NULL_RTX;
6742 if (arity > MAX_ARGS)
6743 return NULL_RTX;
6744
6745 insn_op = &insn_data[icode].operand[arity + nonvoid];
6746
6747 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6748
6749 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6750 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6751 arity++;
6752 }
6753
6754 if (nonvoid)
6755 {
6756 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6757 if (!target
6758 || GET_MODE (target) != tmode
6759 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6760 target = gen_reg_rtx (tmode);
6761 }
6762
6763 switch (arity)
6764 {
6765 case 0:
6766 pat = GEN_FCN (icode) (target);
6767 break;
6768 case 1:
6769 if (nonvoid)
6770 pat = GEN_FCN (icode) (target, op[0]);
6771 else
6772 pat = GEN_FCN (icode) (op[0]);
6773 break;
6774 case 2:
6775 pat = GEN_FCN (icode) (target, op[0], op[1]);
6776 break;
6777 default:
6778 gcc_unreachable ();
6779 }
6780 if (!pat)
6781 return NULL_RTX;
6782 emit_insn (pat);
6783
6784 if (nonvoid)
6785 return target;
6786 else
6787 return const0_rtx;
6788 }
6789
6790
6791 /* Several bits below assume HWI >= 64 bits. This should be enforced
6792 by config.gcc. */
6793 #if HOST_BITS_PER_WIDE_INT < 64
6794 # error "HOST_WIDE_INT too small"
6795 #endif
6796
6797 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6798 with an 8-bit output vector. OPINT contains the integer operands; bit N
6799 of OP_CONST is set if OPINT[N] is valid. */
6800
6801 static tree
6802 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6803 {
6804 if (op_const == 3)
6805 {
6806 int i, val;
6807 for (i = 0, val = 0; i < 8; ++i)
6808 {
6809 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6810 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6811 if (c0 >= c1)
6812 val |= 1 << i;
6813 }
6814 return build_int_cst (long_integer_type_node, val);
6815 }
6816 else if (op_const == 2 && opint[1] == 0)
6817 return build_int_cst (long_integer_type_node, 0xff);
6818 return NULL;
6819 }
6820
6821 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6822 specialized form of an AND operation. Other byte manipulation instructions
6823 are defined in terms of this instruction, so this is also used as a
6824 subroutine for other builtins.
6825
6826 OP contains the tree operands; OPINT contains the extracted integer values.
6827 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6828 OPINT may be considered. */
6829
6830 static tree
6831 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6832 long op_const)
6833 {
6834 if (op_const & 2)
6835 {
6836 unsigned HOST_WIDE_INT mask = 0;
6837 int i;
6838
6839 for (i = 0; i < 8; ++i)
6840 if ((opint[1] >> i) & 1)
6841 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6842
6843 if (op_const & 1)
6844 return build_int_cst (long_integer_type_node, opint[0] & mask);
6845
6846 if (op)
6847 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6848 build_int_cst (long_integer_type_node, mask));
6849 }
6850 else if ((op_const & 1) && opint[0] == 0)
6851 return build_int_cst (long_integer_type_node, 0);
6852 return NULL;
6853 }
6854
6855 /* Fold the builtins for the EXT family of instructions. */
6856
6857 static tree
6858 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6859 long op_const, unsigned HOST_WIDE_INT bytemask,
6860 bool is_high)
6861 {
6862 long zap_const = 2;
6863 tree *zap_op = NULL;
6864
6865 if (op_const & 2)
6866 {
6867 unsigned HOST_WIDE_INT loc;
6868
6869 loc = opint[1] & 7;
6870 if (BYTES_BIG_ENDIAN)
6871 loc ^= 7;
6872 loc *= 8;
6873
6874 if (loc != 0)
6875 {
6876 if (op_const & 1)
6877 {
6878 unsigned HOST_WIDE_INT temp = opint[0];
6879 if (is_high)
6880 temp <<= loc;
6881 else
6882 temp >>= loc;
6883 opint[0] = temp;
6884 zap_const = 3;
6885 }
6886 }
6887 else
6888 zap_op = op;
6889 }
6890
6891 opint[1] = bytemask;
6892 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6893 }
6894
6895 /* Fold the builtins for the INS family of instructions. */
6896
6897 static tree
6898 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6899 long op_const, unsigned HOST_WIDE_INT bytemask,
6900 bool is_high)
6901 {
6902 if ((op_const & 1) && opint[0] == 0)
6903 return build_int_cst (long_integer_type_node, 0);
6904
6905 if (op_const & 2)
6906 {
6907 unsigned HOST_WIDE_INT temp, loc, byteloc;
6908 tree *zap_op = NULL;
6909
6910 loc = opint[1] & 7;
6911 if (BYTES_BIG_ENDIAN)
6912 loc ^= 7;
6913 bytemask <<= loc;
6914
6915 temp = opint[0];
6916 if (is_high)
6917 {
6918 byteloc = (64 - (loc * 8)) & 0x3f;
6919 if (byteloc == 0)
6920 zap_op = op;
6921 else
6922 temp >>= byteloc;
6923 bytemask >>= 8;
6924 }
6925 else
6926 {
6927 byteloc = loc * 8;
6928 if (byteloc == 0)
6929 zap_op = op;
6930 else
6931 temp <<= byteloc;
6932 }
6933
6934 opint[0] = temp;
6935 opint[1] = bytemask;
6936 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6937 }
6938
6939 return NULL;
6940 }
6941
6942 static tree
6943 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6944 long op_const, unsigned HOST_WIDE_INT bytemask,
6945 bool is_high)
6946 {
6947 if (op_const & 2)
6948 {
6949 unsigned HOST_WIDE_INT loc;
6950
6951 loc = opint[1] & 7;
6952 if (BYTES_BIG_ENDIAN)
6953 loc ^= 7;
6954 bytemask <<= loc;
6955
6956 if (is_high)
6957 bytemask >>= 8;
6958
6959 opint[1] = bytemask ^ 0xff;
6960 }
6961
6962 return alpha_fold_builtin_zapnot (op, opint, op_const);
6963 }
6964
6965 static tree
6966 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6967 {
6968 switch (op_const)
6969 {
6970 case 3:
6971 {
6972 unsigned HOST_WIDE_INT l;
6973 HOST_WIDE_INT h;
6974
6975 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6976
6977 #if HOST_BITS_PER_WIDE_INT > 64
6978 # error fixme
6979 #endif
6980
6981 return build_int_cst (long_integer_type_node, h);
6982 }
6983
6984 case 1:
6985 opint[1] = opint[0];
6986 /* FALLTHRU */
6987 case 2:
6988 /* Note that (X*1) >> 64 == 0. */
6989 if (opint[1] == 0 || opint[1] == 1)
6990 return build_int_cst (long_integer_type_node, 0);
6991 break;
6992 }
6993 return NULL;
6994 }
6995
6996 static tree
6997 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6998 {
6999 tree op0 = fold_convert (vtype, op[0]);
7000 tree op1 = fold_convert (vtype, op[1]);
7001 tree val = fold_build2 (code, vtype, op0, op1);
7002 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
7003 }
7004
7005 static tree
7006 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
7007 {
7008 unsigned HOST_WIDE_INT temp = 0;
7009 int i;
7010
7011 if (op_const != 3)
7012 return NULL;
7013
7014 for (i = 0; i < 8; ++i)
7015 {
7016 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
7017 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
7018 if (a >= b)
7019 temp += a - b;
7020 else
7021 temp += b - a;
7022 }
7023
7024 return build_int_cst (long_integer_type_node, temp);
7025 }
7026
7027 static tree
7028 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
7029 {
7030 unsigned HOST_WIDE_INT temp;
7031
7032 if (op_const == 0)
7033 return NULL;
7034
7035 temp = opint[0] & 0xff;
7036 temp |= (opint[0] >> 24) & 0xff00;
7037
7038 return build_int_cst (long_integer_type_node, temp);
7039 }
7040
7041 static tree
7042 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
7043 {
7044 unsigned HOST_WIDE_INT temp;
7045
7046 if (op_const == 0)
7047 return NULL;
7048
7049 temp = opint[0] & 0xff;
7050 temp |= (opint[0] >> 8) & 0xff00;
7051 temp |= (opint[0] >> 16) & 0xff0000;
7052 temp |= (opint[0] >> 24) & 0xff000000;
7053
7054 return build_int_cst (long_integer_type_node, temp);
7055 }
7056
7057 static tree
7058 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
7059 {
7060 unsigned HOST_WIDE_INT temp;
7061
7062 if (op_const == 0)
7063 return NULL;
7064
7065 temp = opint[0] & 0xff;
7066 temp |= (opint[0] & 0xff00) << 24;
7067
7068 return build_int_cst (long_integer_type_node, temp);
7069 }
7070
7071 static tree
7072 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
7073 {
7074 unsigned HOST_WIDE_INT temp;
7075
7076 if (op_const == 0)
7077 return NULL;
7078
7079 temp = opint[0] & 0xff;
7080 temp |= (opint[0] & 0x0000ff00) << 8;
7081 temp |= (opint[0] & 0x00ff0000) << 16;
7082 temp |= (opint[0] & 0xff000000) << 24;
7083
7084 return build_int_cst (long_integer_type_node, temp);
7085 }
7086
7087 static tree
7088 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
7089 {
7090 unsigned HOST_WIDE_INT temp;
7091
7092 if (op_const == 0)
7093 return NULL;
7094
7095 if (opint[0] == 0)
7096 temp = 64;
7097 else
7098 temp = exact_log2 (opint[0] & -opint[0]);
7099
7100 return build_int_cst (long_integer_type_node, temp);
7101 }
7102
7103 static tree
7104 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
7105 {
7106 unsigned HOST_WIDE_INT temp;
7107
7108 if (op_const == 0)
7109 return NULL;
7110
7111 if (opint[0] == 0)
7112 temp = 64;
7113 else
7114 temp = 64 - floor_log2 (opint[0]) - 1;
7115
7116 return build_int_cst (long_integer_type_node, temp);
7117 }
7118
7119 static tree
7120 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
7121 {
7122 unsigned HOST_WIDE_INT temp, op;
7123
7124 if (op_const == 0)
7125 return NULL;
7126
7127 op = opint[0];
7128 temp = 0;
7129 while (op)
7130 temp++, op &= op - 1;
7131
7132 return build_int_cst (long_integer_type_node, temp);
7133 }
7134
7135 /* Fold one of our builtin functions. */
7136
7137 static tree
7138 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
7139 {
7140 tree op[MAX_ARGS], t;
7141 unsigned HOST_WIDE_INT opint[MAX_ARGS];
7142 long op_const = 0, arity = 0;
7143
7144 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
7145 {
7146 tree arg = TREE_VALUE (t);
7147 if (arg == error_mark_node)
7148 return NULL;
7149 if (arity >= MAX_ARGS)
7150 return NULL;
7151
7152 op[arity] = arg;
7153 opint[arity] = 0;
7154 if (TREE_CODE (arg) == INTEGER_CST)
7155 {
7156 op_const |= 1L << arity;
7157 opint[arity] = int_cst_value (arg);
7158 }
7159 }
7160
7161 switch (DECL_FUNCTION_CODE (fndecl))
7162 {
7163 case ALPHA_BUILTIN_CMPBGE:
7164 return alpha_fold_builtin_cmpbge (opint, op_const);
7165
7166 case ALPHA_BUILTIN_EXTBL:
7167 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7168 case ALPHA_BUILTIN_EXTWL:
7169 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7170 case ALPHA_BUILTIN_EXTLL:
7171 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7172 case ALPHA_BUILTIN_EXTQL:
7173 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7174 case ALPHA_BUILTIN_EXTWH:
7175 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7176 case ALPHA_BUILTIN_EXTLH:
7177 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7178 case ALPHA_BUILTIN_EXTQH:
7179 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7180
7181 case ALPHA_BUILTIN_INSBL:
7182 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7183 case ALPHA_BUILTIN_INSWL:
7184 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7185 case ALPHA_BUILTIN_INSLL:
7186 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7187 case ALPHA_BUILTIN_INSQL:
7188 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7189 case ALPHA_BUILTIN_INSWH:
7190 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7191 case ALPHA_BUILTIN_INSLH:
7192 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7193 case ALPHA_BUILTIN_INSQH:
7194 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7195
7196 case ALPHA_BUILTIN_MSKBL:
7197 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7198 case ALPHA_BUILTIN_MSKWL:
7199 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7200 case ALPHA_BUILTIN_MSKLL:
7201 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7202 case ALPHA_BUILTIN_MSKQL:
7203 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7204 case ALPHA_BUILTIN_MSKWH:
7205 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7206 case ALPHA_BUILTIN_MSKLH:
7207 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7208 case ALPHA_BUILTIN_MSKQH:
7209 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7210
7211 case ALPHA_BUILTIN_UMULH:
7212 return alpha_fold_builtin_umulh (opint, op_const);
7213
7214 case ALPHA_BUILTIN_ZAP:
7215 opint[1] ^= 0xff;
7216 /* FALLTHRU */
7217 case ALPHA_BUILTIN_ZAPNOT:
7218 return alpha_fold_builtin_zapnot (op, opint, op_const);
7219
7220 case ALPHA_BUILTIN_MINUB8:
7221 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7222 case ALPHA_BUILTIN_MINSB8:
7223 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7224 case ALPHA_BUILTIN_MINUW4:
7225 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7226 case ALPHA_BUILTIN_MINSW4:
7227 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7228 case ALPHA_BUILTIN_MAXUB8:
7229 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7230 case ALPHA_BUILTIN_MAXSB8:
7231 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7232 case ALPHA_BUILTIN_MAXUW4:
7233 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7234 case ALPHA_BUILTIN_MAXSW4:
7235 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7236
7237 case ALPHA_BUILTIN_PERR:
7238 return alpha_fold_builtin_perr (opint, op_const);
7239 case ALPHA_BUILTIN_PKLB:
7240 return alpha_fold_builtin_pklb (opint, op_const);
7241 case ALPHA_BUILTIN_PKWB:
7242 return alpha_fold_builtin_pkwb (opint, op_const);
7243 case ALPHA_BUILTIN_UNPKBL:
7244 return alpha_fold_builtin_unpkbl (opint, op_const);
7245 case ALPHA_BUILTIN_UNPKBW:
7246 return alpha_fold_builtin_unpkbw (opint, op_const);
7247
7248 case ALPHA_BUILTIN_CTTZ:
7249 return alpha_fold_builtin_cttz (opint, op_const);
7250 case ALPHA_BUILTIN_CTLZ:
7251 return alpha_fold_builtin_ctlz (opint, op_const);
7252 case ALPHA_BUILTIN_CTPOP:
7253 return alpha_fold_builtin_ctpop (opint, op_const);
7254
7255 case ALPHA_BUILTIN_AMASK:
7256 case ALPHA_BUILTIN_IMPLVER:
7257 case ALPHA_BUILTIN_RPCC:
7258 case ALPHA_BUILTIN_THREAD_POINTER:
7259 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7260 /* None of these are foldable at compile-time. */
7261 default:
7262 return NULL;
7263 }
7264 }
7265 \f
7266 /* This page contains routines that are used to determine what the function
7267 prologue and epilogue code will do and write them out. */
7268
7269 /* Compute the size of the save area in the stack. */
7270
7271 /* These variables are used for communication between the following functions.
7272 They indicate various things about the current function being compiled
7273 that are used to tell what kind of prologue, epilogue and procedure
7274 descriptor to generate. */
7275
7276 /* Nonzero if we need a stack procedure. */
7277 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7278 static enum alpha_procedure_types alpha_procedure_type;
7279
7280 /* Register number (either FP or SP) that is used to unwind the frame. */
7281 static int vms_unwind_regno;
7282
7283 /* Register number used to save FP. We need not have one for RA since
7284 we don't modify it for register procedures. This is only defined
7285 for register frame procedures. */
7286 static int vms_save_fp_regno;
7287
7288 /* Register number used to reference objects off our PV. */
7289 static int vms_base_regno;
7290
7291 /* Compute register masks for saved registers. */
7292
7293 static void
7294 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7295 {
7296 unsigned long imask = 0;
7297 unsigned long fmask = 0;
7298 unsigned int i;
7299
7300 /* When outputting a thunk, we don't have valid register life info,
7301 but assemble_start_function wants to output .frame and .mask
7302 directives. */
7303 if (cfun->is_thunk)
7304 {
7305 *imaskP = 0;
7306 *fmaskP = 0;
7307 return;
7308 }
7309
7310 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7311 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7312
7313 /* One for every register we have to save. */
7314 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7315 if (! fixed_regs[i] && ! call_used_regs[i]
7316 && df_regs_ever_live_p (i) && i != REG_RA
7317 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7318 {
7319 if (i < 32)
7320 imask |= (1UL << i);
7321 else
7322 fmask |= (1UL << (i - 32));
7323 }
7324
7325 /* We need to restore these for the handler. */
7326 if (crtl->calls_eh_return)
7327 {
7328 for (i = 0; ; ++i)
7329 {
7330 unsigned regno = EH_RETURN_DATA_REGNO (i);
7331 if (regno == INVALID_REGNUM)
7332 break;
7333 imask |= 1UL << regno;
7334 }
7335 }
7336
7337 /* If any register spilled, then spill the return address also. */
7338 /* ??? This is required by the Digital stack unwind specification
7339 and isn't needed if we're doing Dwarf2 unwinding. */
7340 if (imask || fmask || alpha_ra_ever_killed ())
7341 imask |= (1UL << REG_RA);
7342
7343 *imaskP = imask;
7344 *fmaskP = fmask;
7345 }
7346
7347 int
7348 alpha_sa_size (void)
7349 {
7350 unsigned long mask[2];
7351 int sa_size = 0;
7352 int i, j;
7353
7354 alpha_sa_mask (&mask[0], &mask[1]);
7355
7356 if (TARGET_ABI_UNICOSMK)
7357 {
7358 if (mask[0] || mask[1])
7359 sa_size = 14;
7360 }
7361 else
7362 {
7363 for (j = 0; j < 2; ++j)
7364 for (i = 0; i < 32; ++i)
7365 if ((mask[j] >> i) & 1)
7366 sa_size++;
7367 }
7368
7369 if (TARGET_ABI_UNICOSMK)
7370 {
7371 /* We might not need to generate a frame if we don't make any calls
7372 (including calls to __T3E_MISMATCH if this is a vararg function),
7373 don't have any local variables which require stack slots, don't
7374 use alloca and have not determined that we need a frame for other
7375 reasons. */
7376
7377 alpha_procedure_type
7378 = (sa_size || get_frame_size() != 0
7379 || crtl->outgoing_args_size
7380 || cfun->stdarg || cfun->calls_alloca
7381 || frame_pointer_needed)
7382 ? PT_STACK : PT_REGISTER;
7383
7384 /* Always reserve space for saving callee-saved registers if we
7385 need a frame as required by the calling convention. */
7386 if (alpha_procedure_type == PT_STACK)
7387 sa_size = 14;
7388 }
7389 else if (TARGET_ABI_OPEN_VMS)
7390 {
7391 /* Start with a stack procedure if we make any calls (REG_RA used), or
7392 need a frame pointer, with a register procedure if we otherwise need
7393 at least a slot, and with a null procedure in other cases. */
7394 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7395 alpha_procedure_type = PT_STACK;
7396 else if (get_frame_size() != 0)
7397 alpha_procedure_type = PT_REGISTER;
7398 else
7399 alpha_procedure_type = PT_NULL;
7400
7401 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7402 made the final decision on stack procedure vs register procedure. */
7403 if (alpha_procedure_type == PT_STACK)
7404 sa_size -= 2;
7405
7406 /* Decide whether to refer to objects off our PV via FP or PV.
7407 If we need FP for something else or if we receive a nonlocal
7408 goto (which expects PV to contain the value), we must use PV.
7409 Otherwise, start by assuming we can use FP. */
7410
7411 vms_base_regno
7412 = (frame_pointer_needed
7413 || cfun->has_nonlocal_label
7414 || alpha_procedure_type == PT_STACK
7415 || crtl->outgoing_args_size)
7416 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7417
7418 /* If we want to copy PV into FP, we need to find some register
7419 in which to save FP. */
7420
7421 vms_save_fp_regno = -1;
7422 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7423 for (i = 0; i < 32; i++)
7424 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7425 vms_save_fp_regno = i;
7426
7427 /* A VMS condition handler requires a stack procedure in our
7428 implementation. (not required by the calling standard). */
7429 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7430 || cfun->machine->uses_condition_handler)
7431 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7432 else if (alpha_procedure_type == PT_NULL)
7433 vms_base_regno = REG_PV;
7434
7435 /* Stack unwinding should be done via FP unless we use it for PV. */
7436 vms_unwind_regno = (vms_base_regno == REG_PV
7437 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7438
7439 /* If this is a stack procedure, allow space for saving FP, RA and
7440 a condition handler slot if needed. */
7441 if (alpha_procedure_type == PT_STACK)
7442 sa_size += 2 + cfun->machine->uses_condition_handler;
7443 }
7444 else
7445 {
7446 /* Our size must be even (multiple of 16 bytes). */
7447 if (sa_size & 1)
7448 sa_size++;
7449 }
7450
7451 return sa_size * 8;
7452 }
7453
7454 /* Define the offset between two registers, one to be eliminated,
7455 and the other its replacement, at the start of a routine. */
7456
7457 HOST_WIDE_INT
7458 alpha_initial_elimination_offset (unsigned int from,
7459 unsigned int to ATTRIBUTE_UNUSED)
7460 {
7461 HOST_WIDE_INT ret;
7462
7463 ret = alpha_sa_size ();
7464 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7465
7466 switch (from)
7467 {
7468 case FRAME_POINTER_REGNUM:
7469 break;
7470
7471 case ARG_POINTER_REGNUM:
7472 ret += (ALPHA_ROUND (get_frame_size ()
7473 + crtl->args.pretend_args_size)
7474 - crtl->args.pretend_args_size);
7475 break;
7476
7477 default:
7478 gcc_unreachable ();
7479 }
7480
7481 return ret;
7482 }
7483
7484 #if TARGET_ABI_OPEN_VMS
7485
7486 int
7487 alpha_vms_can_eliminate (unsigned int from ATTRIBUTE_UNUSED, unsigned int to)
7488 {
7489 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7490 alpha_sa_size ();
7491
7492 switch (alpha_procedure_type)
7493 {
7494 case PT_NULL:
7495 /* NULL procedures have no frame of their own and we only
7496 know how to resolve from the current stack pointer. */
7497 return to == STACK_POINTER_REGNUM;
7498
7499 case PT_REGISTER:
7500 case PT_STACK:
7501 /* We always eliminate except to the stack pointer if there is no
7502 usable frame pointer at hand. */
7503 return (to != STACK_POINTER_REGNUM
7504 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7505 }
7506
7507 gcc_unreachable ();
7508 }
7509
7510 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7511 designates the same location as FROM. */
7512
7513 HOST_WIDE_INT
7514 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7515 {
7516 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7517 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7518 on the proper computations and will need the register save area size
7519 in most cases. */
7520
7521 HOST_WIDE_INT sa_size = alpha_sa_size ();
7522
7523 /* PT_NULL procedures have no frame of their own and we only allow
7524 elimination to the stack pointer. This is the argument pointer and we
7525 resolve the soft frame pointer to that as well. */
7526
7527 if (alpha_procedure_type == PT_NULL)
7528 return 0;
7529
7530 /* For a PT_STACK procedure the frame layout looks as follows
7531
7532 -----> decreasing addresses
7533
7534 < size rounded up to 16 | likewise >
7535 --------------#------------------------------+++--------------+++-------#
7536 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7537 --------------#---------------------------------------------------------#
7538 ^ ^ ^ ^
7539 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7540
7541
7542 PT_REGISTER procedures are similar in that they may have a frame of their
7543 own. They have no regs-sa/pv/outgoing-args area.
7544
7545 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7546 to STACK_PTR if need be. */
7547
7548 {
7549 HOST_WIDE_INT offset;
7550 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7551
7552 switch (from)
7553 {
7554 case FRAME_POINTER_REGNUM:
7555 offset = ALPHA_ROUND (sa_size + pv_save_size);
7556 break;
7557 case ARG_POINTER_REGNUM:
7558 offset = (ALPHA_ROUND (sa_size + pv_save_size
7559 + get_frame_size ()
7560 + crtl->args.pretend_args_size)
7561 - crtl->args.pretend_args_size);
7562 break;
7563 default:
7564 gcc_unreachable ();
7565 }
7566
7567 if (to == STACK_POINTER_REGNUM)
7568 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7569
7570 return offset;
7571 }
7572 }
7573
7574 #define COMMON_OBJECT "common_object"
7575
7576 static tree
7577 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7578 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7579 bool *no_add_attrs ATTRIBUTE_UNUSED)
7580 {
7581 tree decl = *node;
7582 gcc_assert (DECL_P (decl));
7583
7584 DECL_COMMON (decl) = 1;
7585 return NULL_TREE;
7586 }
7587
7588 static const struct attribute_spec vms_attribute_table[] =
7589 {
7590 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7591 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler },
7592 { NULL, 0, 0, false, false, false, NULL }
7593 };
7594
7595 void
7596 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7597 unsigned HOST_WIDE_INT size,
7598 unsigned int align)
7599 {
7600 tree attr = DECL_ATTRIBUTES (decl);
7601 fprintf (file, "%s", COMMON_ASM_OP);
7602 assemble_name (file, name);
7603 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7604 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7605 fprintf (file, ",%u", align / BITS_PER_UNIT);
7606 if (attr)
7607 {
7608 attr = lookup_attribute (COMMON_OBJECT, attr);
7609 if (attr)
7610 fprintf (file, ",%s",
7611 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7612 }
7613 fputc ('\n', file);
7614 }
7615
7616 #undef COMMON_OBJECT
7617
7618 #endif
7619
7620 static int
7621 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7622 {
7623 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7624 }
7625
7626 int
7627 alpha_find_lo_sum_using_gp (rtx insn)
7628 {
7629 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7630 }
7631
7632 static int
7633 alpha_does_function_need_gp (void)
7634 {
7635 rtx insn;
7636
7637 /* The GP being variable is an OSF abi thing. */
7638 if (! TARGET_ABI_OSF)
7639 return 0;
7640
7641 /* We need the gp to load the address of __mcount. */
7642 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7643 return 1;
7644
7645 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7646 if (cfun->is_thunk)
7647 return 1;
7648
7649 /* The nonlocal receiver pattern assumes that the gp is valid for
7650 the nested function. Reasonable because it's almost always set
7651 correctly already. For the cases where that's wrong, make sure
7652 the nested function loads its gp on entry. */
7653 if (crtl->has_nonlocal_goto)
7654 return 1;
7655
7656 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7657 Even if we are a static function, we still need to do this in case
7658 our address is taken and passed to something like qsort. */
7659
7660 push_topmost_sequence ();
7661 insn = get_insns ();
7662 pop_topmost_sequence ();
7663
7664 for (; insn; insn = NEXT_INSN (insn))
7665 if (INSN_P (insn)
7666 && ! JUMP_TABLE_DATA_P (insn)
7667 && GET_CODE (PATTERN (insn)) != USE
7668 && GET_CODE (PATTERN (insn)) != CLOBBER
7669 && get_attr_usegp (insn))
7670 return 1;
7671
7672 return 0;
7673 }
7674
7675 \f
7676 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7677 sequences. */
7678
7679 static rtx
7680 set_frame_related_p (void)
7681 {
7682 rtx seq = get_insns ();
7683 rtx insn;
7684
7685 end_sequence ();
7686
7687 if (!seq)
7688 return NULL_RTX;
7689
7690 if (INSN_P (seq))
7691 {
7692 insn = seq;
7693 while (insn != NULL_RTX)
7694 {
7695 RTX_FRAME_RELATED_P (insn) = 1;
7696 insn = NEXT_INSN (insn);
7697 }
7698 seq = emit_insn (seq);
7699 }
7700 else
7701 {
7702 seq = emit_insn (seq);
7703 RTX_FRAME_RELATED_P (seq) = 1;
7704 }
7705 return seq;
7706 }
7707
7708 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7709
7710 /* Generates a store with the proper unwind info attached. VALUE is
7711 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7712 contains SP+FRAME_BIAS, and that is the unwind info that should be
7713 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7714 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7715
7716 static void
7717 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7718 HOST_WIDE_INT base_ofs, rtx frame_reg)
7719 {
7720 rtx addr, mem, insn;
7721
7722 addr = plus_constant (base_reg, base_ofs);
7723 mem = gen_rtx_MEM (DImode, addr);
7724 set_mem_alias_set (mem, alpha_sr_alias_set);
7725
7726 insn = emit_move_insn (mem, value);
7727 RTX_FRAME_RELATED_P (insn) = 1;
7728
7729 if (frame_bias || value != frame_reg)
7730 {
7731 if (frame_bias)
7732 {
7733 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7734 mem = gen_rtx_MEM (DImode, addr);
7735 }
7736
7737 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7738 gen_rtx_SET (VOIDmode, mem, frame_reg));
7739 }
7740 }
7741
7742 static void
7743 emit_frame_store (unsigned int regno, rtx base_reg,
7744 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7745 {
7746 rtx reg = gen_rtx_REG (DImode, regno);
7747 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7748 }
7749
7750 /* Write function prologue. */
7751
7752 /* On vms we have two kinds of functions:
7753
7754 - stack frame (PROC_STACK)
7755 these are 'normal' functions with local vars and which are
7756 calling other functions
7757 - register frame (PROC_REGISTER)
7758 keeps all data in registers, needs no stack
7759
7760 We must pass this to the assembler so it can generate the
7761 proper pdsc (procedure descriptor)
7762 This is done with the '.pdesc' command.
7763
7764 On not-vms, we don't really differentiate between the two, as we can
7765 simply allocate stack without saving registers. */
7766
7767 void
7768 alpha_expand_prologue (void)
7769 {
7770 /* Registers to save. */
7771 unsigned long imask = 0;
7772 unsigned long fmask = 0;
7773 /* Stack space needed for pushing registers clobbered by us. */
7774 HOST_WIDE_INT sa_size;
7775 /* Complete stack size needed. */
7776 HOST_WIDE_INT frame_size;
7777 /* Offset from base reg to register save area. */
7778 HOST_WIDE_INT reg_offset;
7779 rtx sa_reg;
7780 int i;
7781
7782 sa_size = alpha_sa_size ();
7783
7784 frame_size = get_frame_size ();
7785 if (TARGET_ABI_OPEN_VMS)
7786 frame_size = ALPHA_ROUND (sa_size
7787 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7788 + frame_size
7789 + crtl->args.pretend_args_size);
7790 else if (TARGET_ABI_UNICOSMK)
7791 /* We have to allocate space for the DSIB if we generate a frame. */
7792 frame_size = ALPHA_ROUND (sa_size
7793 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7794 + ALPHA_ROUND (frame_size
7795 + crtl->outgoing_args_size);
7796 else
7797 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7798 + sa_size
7799 + ALPHA_ROUND (frame_size
7800 + crtl->args.pretend_args_size));
7801
7802 if (TARGET_ABI_OPEN_VMS)
7803 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7804 else
7805 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7806
7807 alpha_sa_mask (&imask, &fmask);
7808
7809 /* Emit an insn to reload GP, if needed. */
7810 if (TARGET_ABI_OSF)
7811 {
7812 alpha_function_needs_gp = alpha_does_function_need_gp ();
7813 if (alpha_function_needs_gp)
7814 emit_insn (gen_prologue_ldgp ());
7815 }
7816
7817 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7818 the call to mcount ourselves, rather than having the linker do it
7819 magically in response to -pg. Since _mcount has special linkage,
7820 don't represent the call as a call. */
7821 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7822 emit_insn (gen_prologue_mcount ());
7823
7824 if (TARGET_ABI_UNICOSMK)
7825 unicosmk_gen_dsib (&imask);
7826
7827 /* Adjust the stack by the frame size. If the frame size is > 4096
7828 bytes, we need to be sure we probe somewhere in the first and last
7829 4096 bytes (we can probably get away without the latter test) and
7830 every 8192 bytes in between. If the frame size is > 32768, we
7831 do this in a loop. Otherwise, we generate the explicit probe
7832 instructions.
7833
7834 Note that we are only allowed to adjust sp once in the prologue. */
7835
7836 if (frame_size <= 32768)
7837 {
7838 if (frame_size > 4096)
7839 {
7840 int probed;
7841
7842 for (probed = 4096; probed < frame_size; probed += 8192)
7843 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7844 ? -probed + 64
7845 : -probed)));
7846
7847 /* We only have to do this probe if we aren't saving registers. */
7848 if (sa_size == 0 && frame_size > probed - 4096)
7849 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7850 }
7851
7852 if (frame_size != 0)
7853 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7854 GEN_INT (TARGET_ABI_UNICOSMK
7855 ? -frame_size + 64
7856 : -frame_size))));
7857 }
7858 else
7859 {
7860 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7861 number of 8192 byte blocks to probe. We then probe each block
7862 in the loop and then set SP to the proper location. If the
7863 amount remaining is > 4096, we have to do one more probe if we
7864 are not saving any registers. */
7865
7866 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7867 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7868 rtx ptr = gen_rtx_REG (DImode, 22);
7869 rtx count = gen_rtx_REG (DImode, 23);
7870 rtx seq;
7871
7872 emit_move_insn (count, GEN_INT (blocks));
7873 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7874 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7875
7876 /* Because of the difficulty in emitting a new basic block this
7877 late in the compilation, generate the loop as a single insn. */
7878 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7879
7880 if (leftover > 4096 && sa_size == 0)
7881 {
7882 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7883 MEM_VOLATILE_P (last) = 1;
7884 emit_move_insn (last, const0_rtx);
7885 }
7886
7887 if (TARGET_ABI_WINDOWS_NT)
7888 {
7889 /* For NT stack unwind (done by 'reverse execution'), it's
7890 not OK to take the result of a loop, even though the value
7891 is already in ptr, so we reload it via a single operation
7892 and subtract it to sp.
7893
7894 Yes, that's correct -- we have to reload the whole constant
7895 into a temporary via ldah+lda then subtract from sp. */
7896
7897 HOST_WIDE_INT lo, hi;
7898 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7899 hi = frame_size - lo;
7900
7901 emit_move_insn (ptr, GEN_INT (hi));
7902 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7903 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7904 ptr));
7905 }
7906 else
7907 {
7908 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7909 GEN_INT (-leftover)));
7910 }
7911
7912 /* This alternative is special, because the DWARF code cannot
7913 possibly intuit through the loop above. So we invent this
7914 note it looks at instead. */
7915 RTX_FRAME_RELATED_P (seq) = 1;
7916 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7917 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7918 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7919 GEN_INT (TARGET_ABI_UNICOSMK
7920 ? -frame_size + 64
7921 : -frame_size))));
7922 }
7923
7924 if (!TARGET_ABI_UNICOSMK)
7925 {
7926 HOST_WIDE_INT sa_bias = 0;
7927
7928 /* Cope with very large offsets to the register save area. */
7929 sa_reg = stack_pointer_rtx;
7930 if (reg_offset + sa_size > 0x8000)
7931 {
7932 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7933 rtx sa_bias_rtx;
7934
7935 if (low + sa_size <= 0x8000)
7936 sa_bias = reg_offset - low, reg_offset = low;
7937 else
7938 sa_bias = reg_offset, reg_offset = 0;
7939
7940 sa_reg = gen_rtx_REG (DImode, 24);
7941 sa_bias_rtx = GEN_INT (sa_bias);
7942
7943 if (add_operand (sa_bias_rtx, DImode))
7944 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7945 else
7946 {
7947 emit_move_insn (sa_reg, sa_bias_rtx);
7948 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7949 }
7950 }
7951
7952 /* Save regs in stack order. Beginning with VMS PV. */
7953 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7954 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7955
7956 /* Save register RA next. */
7957 if (imask & (1UL << REG_RA))
7958 {
7959 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7960 imask &= ~(1UL << REG_RA);
7961 reg_offset += 8;
7962 }
7963
7964 /* Now save any other registers required to be saved. */
7965 for (i = 0; i < 31; i++)
7966 if (imask & (1UL << i))
7967 {
7968 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7969 reg_offset += 8;
7970 }
7971
7972 for (i = 0; i < 31; i++)
7973 if (fmask & (1UL << i))
7974 {
7975 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7976 reg_offset += 8;
7977 }
7978 }
7979 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7980 {
7981 /* The standard frame on the T3E includes space for saving registers.
7982 We just have to use it. We don't have to save the return address and
7983 the old frame pointer here - they are saved in the DSIB. */
7984
7985 reg_offset = -56;
7986 for (i = 9; i < 15; i++)
7987 if (imask & (1UL << i))
7988 {
7989 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7990 reg_offset -= 8;
7991 }
7992 for (i = 2; i < 10; i++)
7993 if (fmask & (1UL << i))
7994 {
7995 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7996 reg_offset -= 8;
7997 }
7998 }
7999
8000 if (TARGET_ABI_OPEN_VMS)
8001 {
8002 /* Register frame procedures save the fp. */
8003 if (alpha_procedure_type == PT_REGISTER)
8004 {
8005 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
8006 hard_frame_pointer_rtx);
8007 add_reg_note (insn, REG_CFA_REGISTER, NULL);
8008 RTX_FRAME_RELATED_P (insn) = 1;
8009 }
8010
8011 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
8012 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
8013 gen_rtx_REG (DImode, REG_PV)));
8014
8015 if (alpha_procedure_type != PT_NULL
8016 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8017 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8018
8019 /* If we have to allocate space for outgoing args, do it now. */
8020 if (crtl->outgoing_args_size != 0)
8021 {
8022 rtx seq
8023 = emit_move_insn (stack_pointer_rtx,
8024 plus_constant
8025 (hard_frame_pointer_rtx,
8026 - (ALPHA_ROUND
8027 (crtl->outgoing_args_size))));
8028
8029 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
8030 if ! frame_pointer_needed. Setting the bit will change the CFA
8031 computation rule to use sp again, which would be wrong if we had
8032 frame_pointer_needed, as this means sp might move unpredictably
8033 later on.
8034
8035 Also, note that
8036 frame_pointer_needed
8037 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8038 and
8039 crtl->outgoing_args_size != 0
8040 => alpha_procedure_type != PT_NULL,
8041
8042 so when we are not setting the bit here, we are guaranteed to
8043 have emitted an FRP frame pointer update just before. */
8044 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
8045 }
8046 }
8047 else if (!TARGET_ABI_UNICOSMK)
8048 {
8049 /* If we need a frame pointer, set it from the stack pointer. */
8050 if (frame_pointer_needed)
8051 {
8052 if (TARGET_CAN_FAULT_IN_PROLOGUE)
8053 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
8054 else
8055 /* This must always be the last instruction in the
8056 prologue, thus we emit a special move + clobber. */
8057 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
8058 stack_pointer_rtx, sa_reg)));
8059 }
8060 }
8061
8062 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
8063 the prologue, for exception handling reasons, we cannot do this for
8064 any insn that might fault. We could prevent this for mems with a
8065 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
8066 have to prevent all such scheduling with a blockage.
8067
8068 Linux, on the other hand, never bothered to implement OSF/1's
8069 exception handling, and so doesn't care about such things. Anyone
8070 planning to use dwarf2 frame-unwind info can also omit the blockage. */
8071
8072 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
8073 emit_insn (gen_blockage ());
8074 }
8075
8076 /* Count the number of .file directives, so that .loc is up to date. */
8077 int num_source_filenames = 0;
8078
8079 /* Output the textual info surrounding the prologue. */
8080
8081 void
8082 alpha_start_function (FILE *file, const char *fnname,
8083 tree decl ATTRIBUTE_UNUSED)
8084 {
8085 unsigned long imask = 0;
8086 unsigned long fmask = 0;
8087 /* Stack space needed for pushing registers clobbered by us. */
8088 HOST_WIDE_INT sa_size;
8089 /* Complete stack size needed. */
8090 unsigned HOST_WIDE_INT frame_size;
8091 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
8092 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
8093 ? 524288
8094 : 1UL << 31;
8095 /* Offset from base reg to register save area. */
8096 HOST_WIDE_INT reg_offset;
8097 char *entry_label = (char *) alloca (strlen (fnname) + 6);
8098 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
8099 int i;
8100
8101 /* Don't emit an extern directive for functions defined in the same file. */
8102 if (TARGET_ABI_UNICOSMK)
8103 {
8104 tree name_tree;
8105 name_tree = get_identifier (fnname);
8106 TREE_ASM_WRITTEN (name_tree) = 1;
8107 }
8108
8109 #if TARGET_ABI_OPEN_VMS
8110 if (vms_debug_main
8111 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
8112 {
8113 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
8114 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
8115 switch_to_section (text_section);
8116 vms_debug_main = NULL;
8117 }
8118 #endif
8119
8120 alpha_fnname = fnname;
8121 sa_size = alpha_sa_size ();
8122
8123 frame_size = get_frame_size ();
8124 if (TARGET_ABI_OPEN_VMS)
8125 frame_size = ALPHA_ROUND (sa_size
8126 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8127 + frame_size
8128 + crtl->args.pretend_args_size);
8129 else if (TARGET_ABI_UNICOSMK)
8130 frame_size = ALPHA_ROUND (sa_size
8131 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8132 + ALPHA_ROUND (frame_size
8133 + crtl->outgoing_args_size);
8134 else
8135 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8136 + sa_size
8137 + ALPHA_ROUND (frame_size
8138 + crtl->args.pretend_args_size));
8139
8140 if (TARGET_ABI_OPEN_VMS)
8141 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8142 else
8143 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8144
8145 alpha_sa_mask (&imask, &fmask);
8146
8147 /* Ecoff can handle multiple .file directives, so put out file and lineno.
8148 We have to do that before the .ent directive as we cannot switch
8149 files within procedures with native ecoff because line numbers are
8150 linked to procedure descriptors.
8151 Outputting the lineno helps debugging of one line functions as they
8152 would otherwise get no line number at all. Please note that we would
8153 like to put out last_linenum from final.c, but it is not accessible. */
8154
8155 if (write_symbols == SDB_DEBUG)
8156 {
8157 #ifdef ASM_OUTPUT_SOURCE_FILENAME
8158 ASM_OUTPUT_SOURCE_FILENAME (file,
8159 DECL_SOURCE_FILE (current_function_decl));
8160 #endif
8161 #ifdef SDB_OUTPUT_SOURCE_LINE
8162 if (debug_info_level != DINFO_LEVEL_TERSE)
8163 SDB_OUTPUT_SOURCE_LINE (file,
8164 DECL_SOURCE_LINE (current_function_decl));
8165 #endif
8166 }
8167
8168 /* Issue function start and label. */
8169 if (TARGET_ABI_OPEN_VMS
8170 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
8171 {
8172 fputs ("\t.ent ", file);
8173 assemble_name (file, fnname);
8174 putc ('\n', file);
8175
8176 /* If the function needs GP, we'll write the "..ng" label there.
8177 Otherwise, do it here. */
8178 if (TARGET_ABI_OSF
8179 && ! alpha_function_needs_gp
8180 && ! cfun->is_thunk)
8181 {
8182 putc ('$', file);
8183 assemble_name (file, fnname);
8184 fputs ("..ng:\n", file);
8185 }
8186 }
8187 /* Nested functions on VMS that are potentially called via trampoline
8188 get a special transfer entry point that loads the called functions
8189 procedure descriptor and static chain. */
8190 if (TARGET_ABI_OPEN_VMS
8191 && !TREE_PUBLIC (decl)
8192 && DECL_CONTEXT (decl)
8193 && !TYPE_P (DECL_CONTEXT (decl)))
8194 {
8195 strcpy (tramp_label, fnname);
8196 strcat (tramp_label, "..tr");
8197 ASM_OUTPUT_LABEL (file, tramp_label);
8198 fprintf (file, "\tldq $1,24($27)\n");
8199 fprintf (file, "\tldq $27,16($27)\n");
8200 }
8201
8202 strcpy (entry_label, fnname);
8203 if (TARGET_ABI_OPEN_VMS)
8204 strcat (entry_label, "..en");
8205
8206 /* For public functions, the label must be globalized by appending an
8207 additional colon. */
8208 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
8209 strcat (entry_label, ":");
8210
8211 ASM_OUTPUT_LABEL (file, entry_label);
8212 inside_function = TRUE;
8213
8214 if (TARGET_ABI_OPEN_VMS)
8215 fprintf (file, "\t.base $%d\n", vms_base_regno);
8216
8217 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
8218 && !flag_inhibit_size_directive)
8219 {
8220 /* Set flags in procedure descriptor to request IEEE-conformant
8221 math-library routines. The value we set it to is PDSC_EXC_IEEE
8222 (/usr/include/pdsc.h). */
8223 fputs ("\t.eflag 48\n", file);
8224 }
8225
8226 /* Set up offsets to alpha virtual arg/local debugging pointer. */
8227 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
8228 alpha_arg_offset = -frame_size + 48;
8229
8230 /* Describe our frame. If the frame size is larger than an integer,
8231 print it as zero to avoid an assembler error. We won't be
8232 properly describing such a frame, but that's the best we can do. */
8233 if (TARGET_ABI_UNICOSMK)
8234 ;
8235 else if (TARGET_ABI_OPEN_VMS)
8236 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
8237 HOST_WIDE_INT_PRINT_DEC "\n",
8238 vms_unwind_regno,
8239 frame_size >= (1UL << 31) ? 0 : frame_size,
8240 reg_offset);
8241 else if (!flag_inhibit_size_directive)
8242 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
8243 (frame_pointer_needed
8244 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
8245 frame_size >= max_frame_size ? 0 : frame_size,
8246 crtl->args.pretend_args_size);
8247
8248 /* Describe which registers were spilled. */
8249 if (TARGET_ABI_UNICOSMK)
8250 ;
8251 else if (TARGET_ABI_OPEN_VMS)
8252 {
8253 if (imask)
8254 /* ??? Does VMS care if mask contains ra? The old code didn't
8255 set it, so I don't here. */
8256 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
8257 if (fmask)
8258 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
8259 if (alpha_procedure_type == PT_REGISTER)
8260 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
8261 }
8262 else if (!flag_inhibit_size_directive)
8263 {
8264 if (imask)
8265 {
8266 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
8267 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8268
8269 for (i = 0; i < 32; ++i)
8270 if (imask & (1UL << i))
8271 reg_offset += 8;
8272 }
8273
8274 if (fmask)
8275 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
8276 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
8277 }
8278
8279 #if TARGET_ABI_OPEN_VMS
8280 /* If a user condition handler has been installed at some point, emit
8281 the procedure descriptor bits to point the Condition Handling Facility
8282 at the indirection wrapper, and state the fp offset at which the user
8283 handler may be found. */
8284 if (cfun->machine->uses_condition_handler)
8285 {
8286 fprintf (file, "\t.handler __gcc_shell_handler\n");
8287 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
8288 }
8289
8290 /* Ifdef'ed cause link_section are only available then. */
8291 switch_to_section (readonly_data_section);
8292 fprintf (file, "\t.align 3\n");
8293 assemble_name (file, fnname); fputs ("..na:\n", file);
8294 fputs ("\t.ascii \"", file);
8295 assemble_name (file, fnname);
8296 fputs ("\\0\"\n", file);
8297 alpha_need_linkage (fnname, 1);
8298 switch_to_section (text_section);
8299 #endif
8300 }
8301
8302 /* Emit the .prologue note at the scheduled end of the prologue. */
8303
8304 static void
8305 alpha_output_function_end_prologue (FILE *file)
8306 {
8307 if (TARGET_ABI_UNICOSMK)
8308 ;
8309 else if (TARGET_ABI_OPEN_VMS)
8310 fputs ("\t.prologue\n", file);
8311 else if (TARGET_ABI_WINDOWS_NT)
8312 fputs ("\t.prologue 0\n", file);
8313 else if (!flag_inhibit_size_directive)
8314 fprintf (file, "\t.prologue %d\n",
8315 alpha_function_needs_gp || cfun->is_thunk);
8316 }
8317
8318 /* Write function epilogue. */
8319
8320 void
8321 alpha_expand_epilogue (void)
8322 {
8323 /* Registers to save. */
8324 unsigned long imask = 0;
8325 unsigned long fmask = 0;
8326 /* Stack space needed for pushing registers clobbered by us. */
8327 HOST_WIDE_INT sa_size;
8328 /* Complete stack size needed. */
8329 HOST_WIDE_INT frame_size;
8330 /* Offset from base reg to register save area. */
8331 HOST_WIDE_INT reg_offset;
8332 int fp_is_frame_pointer, fp_offset;
8333 rtx sa_reg, sa_reg_exp = NULL;
8334 rtx sp_adj1, sp_adj2, mem, reg, insn;
8335 rtx eh_ofs;
8336 rtx cfa_restores = NULL_RTX;
8337 int i;
8338
8339 sa_size = alpha_sa_size ();
8340
8341 frame_size = get_frame_size ();
8342 if (TARGET_ABI_OPEN_VMS)
8343 frame_size = ALPHA_ROUND (sa_size
8344 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8345 + frame_size
8346 + crtl->args.pretend_args_size);
8347 else if (TARGET_ABI_UNICOSMK)
8348 frame_size = ALPHA_ROUND (sa_size
8349 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8350 + ALPHA_ROUND (frame_size
8351 + crtl->outgoing_args_size);
8352 else
8353 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8354 + sa_size
8355 + ALPHA_ROUND (frame_size
8356 + crtl->args.pretend_args_size));
8357
8358 if (TARGET_ABI_OPEN_VMS)
8359 {
8360 if (alpha_procedure_type == PT_STACK)
8361 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8362 else
8363 reg_offset = 0;
8364 }
8365 else
8366 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8367
8368 alpha_sa_mask (&imask, &fmask);
8369
8370 fp_is_frame_pointer
8371 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8372 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8373 fp_offset = 0;
8374 sa_reg = stack_pointer_rtx;
8375
8376 if (crtl->calls_eh_return)
8377 eh_ofs = EH_RETURN_STACKADJ_RTX;
8378 else
8379 eh_ofs = NULL_RTX;
8380
8381 if (!TARGET_ABI_UNICOSMK && sa_size)
8382 {
8383 /* If we have a frame pointer, restore SP from it. */
8384 if ((TARGET_ABI_OPEN_VMS
8385 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8386 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8387 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8388
8389 /* Cope with very large offsets to the register save area. */
8390 if (reg_offset + sa_size > 0x8000)
8391 {
8392 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8393 HOST_WIDE_INT bias;
8394
8395 if (low + sa_size <= 0x8000)
8396 bias = reg_offset - low, reg_offset = low;
8397 else
8398 bias = reg_offset, reg_offset = 0;
8399
8400 sa_reg = gen_rtx_REG (DImode, 22);
8401 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8402
8403 emit_move_insn (sa_reg, sa_reg_exp);
8404 }
8405
8406 /* Restore registers in order, excepting a true frame pointer. */
8407
8408 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8409 if (! eh_ofs)
8410 set_mem_alias_set (mem, alpha_sr_alias_set);
8411 reg = gen_rtx_REG (DImode, REG_RA);
8412 emit_move_insn (reg, mem);
8413 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8414
8415 reg_offset += 8;
8416 imask &= ~(1UL << REG_RA);
8417
8418 for (i = 0; i < 31; ++i)
8419 if (imask & (1UL << i))
8420 {
8421 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8422 fp_offset = reg_offset;
8423 else
8424 {
8425 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8426 set_mem_alias_set (mem, alpha_sr_alias_set);
8427 reg = gen_rtx_REG (DImode, i);
8428 emit_move_insn (reg, mem);
8429 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8430 cfa_restores);
8431 }
8432 reg_offset += 8;
8433 }
8434
8435 for (i = 0; i < 31; ++i)
8436 if (fmask & (1UL << i))
8437 {
8438 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8439 set_mem_alias_set (mem, alpha_sr_alias_set);
8440 reg = gen_rtx_REG (DFmode, i+32);
8441 emit_move_insn (reg, mem);
8442 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8443 reg_offset += 8;
8444 }
8445 }
8446 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8447 {
8448 /* Restore callee-saved general-purpose registers. */
8449
8450 reg_offset = -56;
8451
8452 for (i = 9; i < 15; i++)
8453 if (imask & (1UL << i))
8454 {
8455 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8456 reg_offset));
8457 set_mem_alias_set (mem, alpha_sr_alias_set);
8458 reg = gen_rtx_REG (DImode, i);
8459 emit_move_insn (reg, mem);
8460 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8461 reg_offset -= 8;
8462 }
8463
8464 for (i = 2; i < 10; i++)
8465 if (fmask & (1UL << i))
8466 {
8467 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8468 reg_offset));
8469 set_mem_alias_set (mem, alpha_sr_alias_set);
8470 reg = gen_rtx_REG (DFmode, i+32);
8471 emit_move_insn (reg, mem);
8472 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8473 reg_offset -= 8;
8474 }
8475
8476 /* Restore the return address from the DSIB. */
8477 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8478 set_mem_alias_set (mem, alpha_sr_alias_set);
8479 reg = gen_rtx_REG (DImode, REG_RA);
8480 emit_move_insn (reg, mem);
8481 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8482 }
8483
8484 if (frame_size || eh_ofs)
8485 {
8486 sp_adj1 = stack_pointer_rtx;
8487
8488 if (eh_ofs)
8489 {
8490 sp_adj1 = gen_rtx_REG (DImode, 23);
8491 emit_move_insn (sp_adj1,
8492 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8493 }
8494
8495 /* If the stack size is large, begin computation into a temporary
8496 register so as not to interfere with a potential fp restore,
8497 which must be consecutive with an SP restore. */
8498 if (frame_size < 32768
8499 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8500 sp_adj2 = GEN_INT (frame_size);
8501 else if (TARGET_ABI_UNICOSMK)
8502 {
8503 sp_adj1 = gen_rtx_REG (DImode, 23);
8504 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8505 sp_adj2 = const0_rtx;
8506 }
8507 else if (frame_size < 0x40007fffL)
8508 {
8509 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8510
8511 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8512 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8513 sp_adj1 = sa_reg;
8514 else
8515 {
8516 sp_adj1 = gen_rtx_REG (DImode, 23);
8517 emit_move_insn (sp_adj1, sp_adj2);
8518 }
8519 sp_adj2 = GEN_INT (low);
8520 }
8521 else
8522 {
8523 rtx tmp = gen_rtx_REG (DImode, 23);
8524 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8525 if (!sp_adj2)
8526 {
8527 /* We can't drop new things to memory this late, afaik,
8528 so build it up by pieces. */
8529 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8530 -(frame_size < 0));
8531 gcc_assert (sp_adj2);
8532 }
8533 }
8534
8535 /* From now on, things must be in order. So emit blockages. */
8536
8537 /* Restore the frame pointer. */
8538 if (TARGET_ABI_UNICOSMK)
8539 {
8540 emit_insn (gen_blockage ());
8541 mem = gen_rtx_MEM (DImode,
8542 plus_constant (hard_frame_pointer_rtx, -16));
8543 set_mem_alias_set (mem, alpha_sr_alias_set);
8544 emit_move_insn (hard_frame_pointer_rtx, mem);
8545 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8546 hard_frame_pointer_rtx, cfa_restores);
8547 }
8548 else if (fp_is_frame_pointer)
8549 {
8550 emit_insn (gen_blockage ());
8551 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8552 set_mem_alias_set (mem, alpha_sr_alias_set);
8553 emit_move_insn (hard_frame_pointer_rtx, mem);
8554 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8555 hard_frame_pointer_rtx, cfa_restores);
8556 }
8557 else if (TARGET_ABI_OPEN_VMS)
8558 {
8559 emit_insn (gen_blockage ());
8560 emit_move_insn (hard_frame_pointer_rtx,
8561 gen_rtx_REG (DImode, vms_save_fp_regno));
8562 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8563 hard_frame_pointer_rtx, cfa_restores);
8564 }
8565
8566 /* Restore the stack pointer. */
8567 emit_insn (gen_blockage ());
8568 if (sp_adj2 == const0_rtx)
8569 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8570 else
8571 insn = emit_move_insn (stack_pointer_rtx,
8572 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8573 REG_NOTES (insn) = cfa_restores;
8574 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8575 RTX_FRAME_RELATED_P (insn) = 1;
8576 }
8577 else
8578 {
8579 gcc_assert (cfa_restores == NULL);
8580
8581 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8582 {
8583 emit_insn (gen_blockage ());
8584 insn = emit_move_insn (hard_frame_pointer_rtx,
8585 gen_rtx_REG (DImode, vms_save_fp_regno));
8586 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8587 RTX_FRAME_RELATED_P (insn) = 1;
8588 }
8589 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8590 {
8591 /* Decrement the frame pointer if the function does not have a
8592 frame. */
8593 emit_insn (gen_blockage ());
8594 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8595 hard_frame_pointer_rtx, constm1_rtx));
8596 }
8597 }
8598 }
8599 \f
8600 /* Output the rest of the textual info surrounding the epilogue. */
8601
8602 void
8603 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8604 {
8605 rtx insn;
8606
8607 /* We output a nop after noreturn calls at the very end of the function to
8608 ensure that the return address always remains in the caller's code range,
8609 as not doing so might confuse unwinding engines. */
8610 insn = get_last_insn ();
8611 if (!INSN_P (insn))
8612 insn = prev_active_insn (insn);
8613 if (insn && CALL_P (insn))
8614 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8615
8616 #if TARGET_ABI_OPEN_VMS
8617 alpha_write_linkage (file, fnname, decl);
8618 #endif
8619
8620 /* End the function. */
8621 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8622 {
8623 fputs ("\t.end ", file);
8624 assemble_name (file, fnname);
8625 putc ('\n', file);
8626 }
8627 inside_function = FALSE;
8628
8629 /* Output jump tables and the static subroutine information block. */
8630 if (TARGET_ABI_UNICOSMK)
8631 {
8632 unicosmk_output_ssib (file, fnname);
8633 unicosmk_output_deferred_case_vectors (file);
8634 }
8635 }
8636
8637 #if TARGET_ABI_OPEN_VMS
8638 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8639 {
8640 #ifdef DO_CRTL_NAMES
8641 DO_CRTL_NAMES;
8642 #endif
8643 }
8644 #endif
8645
8646 #if TARGET_ABI_OSF
8647 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8648
8649 In order to avoid the hordes of differences between generated code
8650 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8651 lots of code loading up large constants, generate rtl and emit it
8652 instead of going straight to text.
8653
8654 Not sure why this idea hasn't been explored before... */
8655
8656 static void
8657 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8658 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8659 tree function)
8660 {
8661 HOST_WIDE_INT hi, lo;
8662 rtx this_rtx, insn, funexp;
8663
8664 insn_locators_alloc ();
8665
8666 /* We always require a valid GP. */
8667 emit_insn (gen_prologue_ldgp ());
8668 emit_note (NOTE_INSN_PROLOGUE_END);
8669
8670 /* Find the "this" pointer. If the function returns a structure,
8671 the structure return pointer is in $16. */
8672 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8673 this_rtx = gen_rtx_REG (Pmode, 17);
8674 else
8675 this_rtx = gen_rtx_REG (Pmode, 16);
8676
8677 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8678 entire constant for the add. */
8679 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8680 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8681 if (hi + lo == delta)
8682 {
8683 if (hi)
8684 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8685 if (lo)
8686 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8687 }
8688 else
8689 {
8690 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8691 delta, -(delta < 0));
8692 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8693 }
8694
8695 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8696 if (vcall_offset)
8697 {
8698 rtx tmp, tmp2;
8699
8700 tmp = gen_rtx_REG (Pmode, 0);
8701 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8702
8703 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8704 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8705 if (hi + lo == vcall_offset)
8706 {
8707 if (hi)
8708 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8709 }
8710 else
8711 {
8712 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8713 vcall_offset, -(vcall_offset < 0));
8714 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8715 lo = 0;
8716 }
8717 if (lo)
8718 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8719 else
8720 tmp2 = tmp;
8721 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8722
8723 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8724 }
8725
8726 /* Generate a tail call to the target function. */
8727 if (! TREE_USED (function))
8728 {
8729 assemble_external (function);
8730 TREE_USED (function) = 1;
8731 }
8732 funexp = XEXP (DECL_RTL (function), 0);
8733 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8734 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8735 SIBLING_CALL_P (insn) = 1;
8736
8737 /* Run just enough of rest_of_compilation to get the insns emitted.
8738 There's not really enough bulk here to make other passes such as
8739 instruction scheduling worth while. Note that use_thunk calls
8740 assemble_start_function and assemble_end_function. */
8741 insn = get_insns ();
8742 shorten_branches (insn);
8743 final_start_function (insn, file, 1);
8744 final (insn, file, 1);
8745 final_end_function ();
8746 }
8747 #endif /* TARGET_ABI_OSF */
8748 \f
8749 /* Debugging support. */
8750
8751 #include "gstab.h"
8752
8753 /* Count the number of sdb related labels are generated (to find block
8754 start and end boundaries). */
8755
8756 int sdb_label_count = 0;
8757
8758 /* Name of the file containing the current function. */
8759
8760 static const char *current_function_file = "";
8761
8762 /* Offsets to alpha virtual arg/local debugging pointers. */
8763
8764 long alpha_arg_offset;
8765 long alpha_auto_offset;
8766 \f
8767 /* Emit a new filename to a stream. */
8768
8769 void
8770 alpha_output_filename (FILE *stream, const char *name)
8771 {
8772 static int first_time = TRUE;
8773
8774 if (first_time)
8775 {
8776 first_time = FALSE;
8777 ++num_source_filenames;
8778 current_function_file = name;
8779 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8780 output_quoted_string (stream, name);
8781 fprintf (stream, "\n");
8782 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8783 fprintf (stream, "\t#@stabs\n");
8784 }
8785
8786 else if (write_symbols == DBX_DEBUG)
8787 /* dbxout.c will emit an appropriate .stabs directive. */
8788 return;
8789
8790 else if (name != current_function_file
8791 && strcmp (name, current_function_file) != 0)
8792 {
8793 if (inside_function && ! TARGET_GAS)
8794 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8795 else
8796 {
8797 ++num_source_filenames;
8798 current_function_file = name;
8799 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8800 }
8801
8802 output_quoted_string (stream, name);
8803 fprintf (stream, "\n");
8804 }
8805 }
8806 \f
8807 /* Structure to show the current status of registers and memory. */
8808
8809 struct shadow_summary
8810 {
8811 struct {
8812 unsigned int i : 31; /* Mask of int regs */
8813 unsigned int fp : 31; /* Mask of fp regs */
8814 unsigned int mem : 1; /* mem == imem | fpmem */
8815 } used, defd;
8816 };
8817
8818 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8819 to the summary structure. SET is nonzero if the insn is setting the
8820 object, otherwise zero. */
8821
8822 static void
8823 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8824 {
8825 const char *format_ptr;
8826 int i, j;
8827
8828 if (x == 0)
8829 return;
8830
8831 switch (GET_CODE (x))
8832 {
8833 /* ??? Note that this case would be incorrect if the Alpha had a
8834 ZERO_EXTRACT in SET_DEST. */
8835 case SET:
8836 summarize_insn (SET_SRC (x), sum, 0);
8837 summarize_insn (SET_DEST (x), sum, 1);
8838 break;
8839
8840 case CLOBBER:
8841 summarize_insn (XEXP (x, 0), sum, 1);
8842 break;
8843
8844 case USE:
8845 summarize_insn (XEXP (x, 0), sum, 0);
8846 break;
8847
8848 case ASM_OPERANDS:
8849 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8850 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8851 break;
8852
8853 case PARALLEL:
8854 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8855 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8856 break;
8857
8858 case SUBREG:
8859 summarize_insn (SUBREG_REG (x), sum, 0);
8860 break;
8861
8862 case REG:
8863 {
8864 int regno = REGNO (x);
8865 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8866
8867 if (regno == 31 || regno == 63)
8868 break;
8869
8870 if (set)
8871 {
8872 if (regno < 32)
8873 sum->defd.i |= mask;
8874 else
8875 sum->defd.fp |= mask;
8876 }
8877 else
8878 {
8879 if (regno < 32)
8880 sum->used.i |= mask;
8881 else
8882 sum->used.fp |= mask;
8883 }
8884 }
8885 break;
8886
8887 case MEM:
8888 if (set)
8889 sum->defd.mem = 1;
8890 else
8891 sum->used.mem = 1;
8892
8893 /* Find the regs used in memory address computation: */
8894 summarize_insn (XEXP (x, 0), sum, 0);
8895 break;
8896
8897 case CONST_INT: case CONST_DOUBLE:
8898 case SYMBOL_REF: case LABEL_REF: case CONST:
8899 case SCRATCH: case ASM_INPUT:
8900 break;
8901
8902 /* Handle common unary and binary ops for efficiency. */
8903 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8904 case MOD: case UDIV: case UMOD: case AND: case IOR:
8905 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8906 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8907 case NE: case EQ: case GE: case GT: case LE:
8908 case LT: case GEU: case GTU: case LEU: case LTU:
8909 summarize_insn (XEXP (x, 0), sum, 0);
8910 summarize_insn (XEXP (x, 1), sum, 0);
8911 break;
8912
8913 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8914 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8915 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8916 case SQRT: case FFS:
8917 summarize_insn (XEXP (x, 0), sum, 0);
8918 break;
8919
8920 default:
8921 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8922 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8923 switch (format_ptr[i])
8924 {
8925 case 'e':
8926 summarize_insn (XEXP (x, i), sum, 0);
8927 break;
8928
8929 case 'E':
8930 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8931 summarize_insn (XVECEXP (x, i, j), sum, 0);
8932 break;
8933
8934 case 'i':
8935 break;
8936
8937 default:
8938 gcc_unreachable ();
8939 }
8940 }
8941 }
8942
8943 /* Ensure a sufficient number of `trapb' insns are in the code when
8944 the user requests code with a trap precision of functions or
8945 instructions.
8946
8947 In naive mode, when the user requests a trap-precision of
8948 "instruction", a trapb is needed after every instruction that may
8949 generate a trap. This ensures that the code is resumption safe but
8950 it is also slow.
8951
8952 When optimizations are turned on, we delay issuing a trapb as long
8953 as possible. In this context, a trap shadow is the sequence of
8954 instructions that starts with a (potentially) trap generating
8955 instruction and extends to the next trapb or call_pal instruction
8956 (but GCC never generates call_pal by itself). We can delay (and
8957 therefore sometimes omit) a trapb subject to the following
8958 conditions:
8959
8960 (a) On entry to the trap shadow, if any Alpha register or memory
8961 location contains a value that is used as an operand value by some
8962 instruction in the trap shadow (live on entry), then no instruction
8963 in the trap shadow may modify the register or memory location.
8964
8965 (b) Within the trap shadow, the computation of the base register
8966 for a memory load or store instruction may not involve using the
8967 result of an instruction that might generate an UNPREDICTABLE
8968 result.
8969
8970 (c) Within the trap shadow, no register may be used more than once
8971 as a destination register. (This is to make life easier for the
8972 trap-handler.)
8973
8974 (d) The trap shadow may not include any branch instructions. */
8975
8976 static void
8977 alpha_handle_trap_shadows (void)
8978 {
8979 struct shadow_summary shadow;
8980 int trap_pending, exception_nesting;
8981 rtx i, n;
8982
8983 trap_pending = 0;
8984 exception_nesting = 0;
8985 shadow.used.i = 0;
8986 shadow.used.fp = 0;
8987 shadow.used.mem = 0;
8988 shadow.defd = shadow.used;
8989
8990 for (i = get_insns (); i ; i = NEXT_INSN (i))
8991 {
8992 if (NOTE_P (i))
8993 {
8994 switch (NOTE_KIND (i))
8995 {
8996 case NOTE_INSN_EH_REGION_BEG:
8997 exception_nesting++;
8998 if (trap_pending)
8999 goto close_shadow;
9000 break;
9001
9002 case NOTE_INSN_EH_REGION_END:
9003 exception_nesting--;
9004 if (trap_pending)
9005 goto close_shadow;
9006 break;
9007
9008 case NOTE_INSN_EPILOGUE_BEG:
9009 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
9010 goto close_shadow;
9011 break;
9012 }
9013 }
9014 else if (trap_pending)
9015 {
9016 if (alpha_tp == ALPHA_TP_FUNC)
9017 {
9018 if (JUMP_P (i)
9019 && GET_CODE (PATTERN (i)) == RETURN)
9020 goto close_shadow;
9021 }
9022 else if (alpha_tp == ALPHA_TP_INSN)
9023 {
9024 if (optimize > 0)
9025 {
9026 struct shadow_summary sum;
9027
9028 sum.used.i = 0;
9029 sum.used.fp = 0;
9030 sum.used.mem = 0;
9031 sum.defd = sum.used;
9032
9033 switch (GET_CODE (i))
9034 {
9035 case INSN:
9036 /* Annoyingly, get_attr_trap will die on these. */
9037 if (GET_CODE (PATTERN (i)) == USE
9038 || GET_CODE (PATTERN (i)) == CLOBBER)
9039 break;
9040
9041 summarize_insn (PATTERN (i), &sum, 0);
9042
9043 if ((sum.defd.i & shadow.defd.i)
9044 || (sum.defd.fp & shadow.defd.fp))
9045 {
9046 /* (c) would be violated */
9047 goto close_shadow;
9048 }
9049
9050 /* Combine shadow with summary of current insn: */
9051 shadow.used.i |= sum.used.i;
9052 shadow.used.fp |= sum.used.fp;
9053 shadow.used.mem |= sum.used.mem;
9054 shadow.defd.i |= sum.defd.i;
9055 shadow.defd.fp |= sum.defd.fp;
9056 shadow.defd.mem |= sum.defd.mem;
9057
9058 if ((sum.defd.i & shadow.used.i)
9059 || (sum.defd.fp & shadow.used.fp)
9060 || (sum.defd.mem & shadow.used.mem))
9061 {
9062 /* (a) would be violated (also takes care of (b)) */
9063 gcc_assert (get_attr_trap (i) != TRAP_YES
9064 || (!(sum.defd.i & sum.used.i)
9065 && !(sum.defd.fp & sum.used.fp)));
9066
9067 goto close_shadow;
9068 }
9069 break;
9070
9071 case JUMP_INSN:
9072 case CALL_INSN:
9073 case CODE_LABEL:
9074 goto close_shadow;
9075
9076 default:
9077 gcc_unreachable ();
9078 }
9079 }
9080 else
9081 {
9082 close_shadow:
9083 n = emit_insn_before (gen_trapb (), i);
9084 PUT_MODE (n, TImode);
9085 PUT_MODE (i, TImode);
9086 trap_pending = 0;
9087 shadow.used.i = 0;
9088 shadow.used.fp = 0;
9089 shadow.used.mem = 0;
9090 shadow.defd = shadow.used;
9091 }
9092 }
9093 }
9094
9095 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
9096 && NONJUMP_INSN_P (i)
9097 && GET_CODE (PATTERN (i)) != USE
9098 && GET_CODE (PATTERN (i)) != CLOBBER
9099 && get_attr_trap (i) == TRAP_YES)
9100 {
9101 if (optimize && !trap_pending)
9102 summarize_insn (PATTERN (i), &shadow, 0);
9103 trap_pending = 1;
9104 }
9105 }
9106 }
9107 \f
9108 /* Alpha can only issue instruction groups simultaneously if they are
9109 suitably aligned. This is very processor-specific. */
9110 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
9111 that are marked "fake". These instructions do not exist on that target,
9112 but it is possible to see these insns with deranged combinations of
9113 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
9114 choose a result at random. */
9115
9116 enum alphaev4_pipe {
9117 EV4_STOP = 0,
9118 EV4_IB0 = 1,
9119 EV4_IB1 = 2,
9120 EV4_IBX = 4
9121 };
9122
9123 enum alphaev5_pipe {
9124 EV5_STOP = 0,
9125 EV5_NONE = 1,
9126 EV5_E01 = 2,
9127 EV5_E0 = 4,
9128 EV5_E1 = 8,
9129 EV5_FAM = 16,
9130 EV5_FA = 32,
9131 EV5_FM = 64
9132 };
9133
9134 static enum alphaev4_pipe
9135 alphaev4_insn_pipe (rtx insn)
9136 {
9137 if (recog_memoized (insn) < 0)
9138 return EV4_STOP;
9139 if (get_attr_length (insn) != 4)
9140 return EV4_STOP;
9141
9142 switch (get_attr_type (insn))
9143 {
9144 case TYPE_ILD:
9145 case TYPE_LDSYM:
9146 case TYPE_FLD:
9147 case TYPE_LD_L:
9148 return EV4_IBX;
9149
9150 case TYPE_IADD:
9151 case TYPE_ILOG:
9152 case TYPE_ICMOV:
9153 case TYPE_ICMP:
9154 case TYPE_FST:
9155 case TYPE_SHIFT:
9156 case TYPE_IMUL:
9157 case TYPE_FBR:
9158 case TYPE_MVI: /* fake */
9159 return EV4_IB0;
9160
9161 case TYPE_IST:
9162 case TYPE_MISC:
9163 case TYPE_IBR:
9164 case TYPE_JSR:
9165 case TYPE_CALLPAL:
9166 case TYPE_FCPYS:
9167 case TYPE_FCMOV:
9168 case TYPE_FADD:
9169 case TYPE_FDIV:
9170 case TYPE_FMUL:
9171 case TYPE_ST_C:
9172 case TYPE_MB:
9173 case TYPE_FSQRT: /* fake */
9174 case TYPE_FTOI: /* fake */
9175 case TYPE_ITOF: /* fake */
9176 return EV4_IB1;
9177
9178 default:
9179 gcc_unreachable ();
9180 }
9181 }
9182
9183 static enum alphaev5_pipe
9184 alphaev5_insn_pipe (rtx insn)
9185 {
9186 if (recog_memoized (insn) < 0)
9187 return EV5_STOP;
9188 if (get_attr_length (insn) != 4)
9189 return EV5_STOP;
9190
9191 switch (get_attr_type (insn))
9192 {
9193 case TYPE_ILD:
9194 case TYPE_FLD:
9195 case TYPE_LDSYM:
9196 case TYPE_IADD:
9197 case TYPE_ILOG:
9198 case TYPE_ICMOV:
9199 case TYPE_ICMP:
9200 return EV5_E01;
9201
9202 case TYPE_IST:
9203 case TYPE_FST:
9204 case TYPE_SHIFT:
9205 case TYPE_IMUL:
9206 case TYPE_MISC:
9207 case TYPE_MVI:
9208 case TYPE_LD_L:
9209 case TYPE_ST_C:
9210 case TYPE_MB:
9211 case TYPE_FTOI: /* fake */
9212 case TYPE_ITOF: /* fake */
9213 return EV5_E0;
9214
9215 case TYPE_IBR:
9216 case TYPE_JSR:
9217 case TYPE_CALLPAL:
9218 return EV5_E1;
9219
9220 case TYPE_FCPYS:
9221 return EV5_FAM;
9222
9223 case TYPE_FBR:
9224 case TYPE_FCMOV:
9225 case TYPE_FADD:
9226 case TYPE_FDIV:
9227 case TYPE_FSQRT: /* fake */
9228 return EV5_FA;
9229
9230 case TYPE_FMUL:
9231 return EV5_FM;
9232
9233 default:
9234 gcc_unreachable ();
9235 }
9236 }
9237
9238 /* IN_USE is a mask of the slots currently filled within the insn group.
9239 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
9240 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
9241
9242 LEN is, of course, the length of the group in bytes. */
9243
9244 static rtx
9245 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
9246 {
9247 int len, in_use;
9248
9249 len = in_use = 0;
9250
9251 if (! INSN_P (insn)
9252 || GET_CODE (PATTERN (insn)) == CLOBBER
9253 || GET_CODE (PATTERN (insn)) == USE)
9254 goto next_and_done;
9255
9256 while (1)
9257 {
9258 enum alphaev4_pipe pipe;
9259
9260 pipe = alphaev4_insn_pipe (insn);
9261 switch (pipe)
9262 {
9263 case EV4_STOP:
9264 /* Force complex instructions to start new groups. */
9265 if (in_use)
9266 goto done;
9267
9268 /* If this is a completely unrecognized insn, it's an asm.
9269 We don't know how long it is, so record length as -1 to
9270 signal a needed realignment. */
9271 if (recog_memoized (insn) < 0)
9272 len = -1;
9273 else
9274 len = get_attr_length (insn);
9275 goto next_and_done;
9276
9277 case EV4_IBX:
9278 if (in_use & EV4_IB0)
9279 {
9280 if (in_use & EV4_IB1)
9281 goto done;
9282 in_use |= EV4_IB1;
9283 }
9284 else
9285 in_use |= EV4_IB0 | EV4_IBX;
9286 break;
9287
9288 case EV4_IB0:
9289 if (in_use & EV4_IB0)
9290 {
9291 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
9292 goto done;
9293 in_use |= EV4_IB1;
9294 }
9295 in_use |= EV4_IB0;
9296 break;
9297
9298 case EV4_IB1:
9299 if (in_use & EV4_IB1)
9300 goto done;
9301 in_use |= EV4_IB1;
9302 break;
9303
9304 default:
9305 gcc_unreachable ();
9306 }
9307 len += 4;
9308
9309 /* Haifa doesn't do well scheduling branches. */
9310 if (JUMP_P (insn))
9311 goto next_and_done;
9312
9313 next:
9314 insn = next_nonnote_insn (insn);
9315
9316 if (!insn || ! INSN_P (insn))
9317 goto done;
9318
9319 /* Let Haifa tell us where it thinks insn group boundaries are. */
9320 if (GET_MODE (insn) == TImode)
9321 goto done;
9322
9323 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9324 goto next;
9325 }
9326
9327 next_and_done:
9328 insn = next_nonnote_insn (insn);
9329
9330 done:
9331 *plen = len;
9332 *pin_use = in_use;
9333 return insn;
9334 }
9335
9336 /* IN_USE is a mask of the slots currently filled within the insn group.
9337 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9338 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9339
9340 LEN is, of course, the length of the group in bytes. */
9341
9342 static rtx
9343 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9344 {
9345 int len, in_use;
9346
9347 len = in_use = 0;
9348
9349 if (! INSN_P (insn)
9350 || GET_CODE (PATTERN (insn)) == CLOBBER
9351 || GET_CODE (PATTERN (insn)) == USE)
9352 goto next_and_done;
9353
9354 while (1)
9355 {
9356 enum alphaev5_pipe pipe;
9357
9358 pipe = alphaev5_insn_pipe (insn);
9359 switch (pipe)
9360 {
9361 case EV5_STOP:
9362 /* Force complex instructions to start new groups. */
9363 if (in_use)
9364 goto done;
9365
9366 /* If this is a completely unrecognized insn, it's an asm.
9367 We don't know how long it is, so record length as -1 to
9368 signal a needed realignment. */
9369 if (recog_memoized (insn) < 0)
9370 len = -1;
9371 else
9372 len = get_attr_length (insn);
9373 goto next_and_done;
9374
9375 /* ??? Most of the places below, we would like to assert never
9376 happen, as it would indicate an error either in Haifa, or
9377 in the scheduling description. Unfortunately, Haifa never
9378 schedules the last instruction of the BB, so we don't have
9379 an accurate TI bit to go off. */
9380 case EV5_E01:
9381 if (in_use & EV5_E0)
9382 {
9383 if (in_use & EV5_E1)
9384 goto done;
9385 in_use |= EV5_E1;
9386 }
9387 else
9388 in_use |= EV5_E0 | EV5_E01;
9389 break;
9390
9391 case EV5_E0:
9392 if (in_use & EV5_E0)
9393 {
9394 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9395 goto done;
9396 in_use |= EV5_E1;
9397 }
9398 in_use |= EV5_E0;
9399 break;
9400
9401 case EV5_E1:
9402 if (in_use & EV5_E1)
9403 goto done;
9404 in_use |= EV5_E1;
9405 break;
9406
9407 case EV5_FAM:
9408 if (in_use & EV5_FA)
9409 {
9410 if (in_use & EV5_FM)
9411 goto done;
9412 in_use |= EV5_FM;
9413 }
9414 else
9415 in_use |= EV5_FA | EV5_FAM;
9416 break;
9417
9418 case EV5_FA:
9419 if (in_use & EV5_FA)
9420 goto done;
9421 in_use |= EV5_FA;
9422 break;
9423
9424 case EV5_FM:
9425 if (in_use & EV5_FM)
9426 goto done;
9427 in_use |= EV5_FM;
9428 break;
9429
9430 case EV5_NONE:
9431 break;
9432
9433 default:
9434 gcc_unreachable ();
9435 }
9436 len += 4;
9437
9438 /* Haifa doesn't do well scheduling branches. */
9439 /* ??? If this is predicted not-taken, slotting continues, except
9440 that no more IBR, FBR, or JSR insns may be slotted. */
9441 if (JUMP_P (insn))
9442 goto next_and_done;
9443
9444 next:
9445 insn = next_nonnote_insn (insn);
9446
9447 if (!insn || ! INSN_P (insn))
9448 goto done;
9449
9450 /* Let Haifa tell us where it thinks insn group boundaries are. */
9451 if (GET_MODE (insn) == TImode)
9452 goto done;
9453
9454 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9455 goto next;
9456 }
9457
9458 next_and_done:
9459 insn = next_nonnote_insn (insn);
9460
9461 done:
9462 *plen = len;
9463 *pin_use = in_use;
9464 return insn;
9465 }
9466
9467 static rtx
9468 alphaev4_next_nop (int *pin_use)
9469 {
9470 int in_use = *pin_use;
9471 rtx nop;
9472
9473 if (!(in_use & EV4_IB0))
9474 {
9475 in_use |= EV4_IB0;
9476 nop = gen_nop ();
9477 }
9478 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9479 {
9480 in_use |= EV4_IB1;
9481 nop = gen_nop ();
9482 }
9483 else if (TARGET_FP && !(in_use & EV4_IB1))
9484 {
9485 in_use |= EV4_IB1;
9486 nop = gen_fnop ();
9487 }
9488 else
9489 nop = gen_unop ();
9490
9491 *pin_use = in_use;
9492 return nop;
9493 }
9494
9495 static rtx
9496 alphaev5_next_nop (int *pin_use)
9497 {
9498 int in_use = *pin_use;
9499 rtx nop;
9500
9501 if (!(in_use & EV5_E1))
9502 {
9503 in_use |= EV5_E1;
9504 nop = gen_nop ();
9505 }
9506 else if (TARGET_FP && !(in_use & EV5_FA))
9507 {
9508 in_use |= EV5_FA;
9509 nop = gen_fnop ();
9510 }
9511 else if (TARGET_FP && !(in_use & EV5_FM))
9512 {
9513 in_use |= EV5_FM;
9514 nop = gen_fnop ();
9515 }
9516 else
9517 nop = gen_unop ();
9518
9519 *pin_use = in_use;
9520 return nop;
9521 }
9522
9523 /* The instruction group alignment main loop. */
9524
9525 static void
9526 alpha_align_insns (unsigned int max_align,
9527 rtx (*next_group) (rtx, int *, int *),
9528 rtx (*next_nop) (int *))
9529 {
9530 /* ALIGN is the known alignment for the insn group. */
9531 unsigned int align;
9532 /* OFS is the offset of the current insn in the insn group. */
9533 int ofs;
9534 int prev_in_use, in_use, len, ldgp;
9535 rtx i, next;
9536
9537 /* Let shorten branches care for assigning alignments to code labels. */
9538 shorten_branches (get_insns ());
9539
9540 if (align_functions < 4)
9541 align = 4;
9542 else if ((unsigned int) align_functions < max_align)
9543 align = align_functions;
9544 else
9545 align = max_align;
9546
9547 ofs = prev_in_use = 0;
9548 i = get_insns ();
9549 if (NOTE_P (i))
9550 i = next_nonnote_insn (i);
9551
9552 ldgp = alpha_function_needs_gp ? 8 : 0;
9553
9554 while (i)
9555 {
9556 next = (*next_group) (i, &in_use, &len);
9557
9558 /* When we see a label, resync alignment etc. */
9559 if (LABEL_P (i))
9560 {
9561 unsigned int new_align = 1 << label_to_alignment (i);
9562
9563 if (new_align >= align)
9564 {
9565 align = new_align < max_align ? new_align : max_align;
9566 ofs = 0;
9567 }
9568
9569 else if (ofs & (new_align-1))
9570 ofs = (ofs | (new_align-1)) + 1;
9571 gcc_assert (!len);
9572 }
9573
9574 /* Handle complex instructions special. */
9575 else if (in_use == 0)
9576 {
9577 /* Asms will have length < 0. This is a signal that we have
9578 lost alignment knowledge. Assume, however, that the asm
9579 will not mis-align instructions. */
9580 if (len < 0)
9581 {
9582 ofs = 0;
9583 align = 4;
9584 len = 0;
9585 }
9586 }
9587
9588 /* If the known alignment is smaller than the recognized insn group,
9589 realign the output. */
9590 else if ((int) align < len)
9591 {
9592 unsigned int new_log_align = len > 8 ? 4 : 3;
9593 rtx prev, where;
9594
9595 where = prev = prev_nonnote_insn (i);
9596 if (!where || !LABEL_P (where))
9597 where = i;
9598
9599 /* Can't realign between a call and its gp reload. */
9600 if (! (TARGET_EXPLICIT_RELOCS
9601 && prev && CALL_P (prev)))
9602 {
9603 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9604 align = 1 << new_log_align;
9605 ofs = 0;
9606 }
9607 }
9608
9609 /* We may not insert padding inside the initial ldgp sequence. */
9610 else if (ldgp > 0)
9611 ldgp -= len;
9612
9613 /* If the group won't fit in the same INT16 as the previous,
9614 we need to add padding to keep the group together. Rather
9615 than simply leaving the insn filling to the assembler, we
9616 can make use of the knowledge of what sorts of instructions
9617 were issued in the previous group to make sure that all of
9618 the added nops are really free. */
9619 else if (ofs + len > (int) align)
9620 {
9621 int nop_count = (align - ofs) / 4;
9622 rtx where;
9623
9624 /* Insert nops before labels, branches, and calls to truly merge
9625 the execution of the nops with the previous instruction group. */
9626 where = prev_nonnote_insn (i);
9627 if (where)
9628 {
9629 if (LABEL_P (where))
9630 {
9631 rtx where2 = prev_nonnote_insn (where);
9632 if (where2 && JUMP_P (where2))
9633 where = where2;
9634 }
9635 else if (NONJUMP_INSN_P (where))
9636 where = i;
9637 }
9638 else
9639 where = i;
9640
9641 do
9642 emit_insn_before ((*next_nop)(&prev_in_use), where);
9643 while (--nop_count);
9644 ofs = 0;
9645 }
9646
9647 ofs = (ofs + len) & (align - 1);
9648 prev_in_use = in_use;
9649 i = next;
9650 }
9651 }
9652
9653 /* Insert an unop between a noreturn function call and GP load. */
9654
9655 static void
9656 alpha_pad_noreturn (void)
9657 {
9658 rtx insn, next;
9659
9660 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9661 {
9662 if (!CALL_P (insn)
9663 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9664 continue;
9665
9666 next = next_active_insn (insn);
9667
9668 if (next)
9669 {
9670 rtx pat = PATTERN (next);
9671
9672 if (GET_CODE (pat) == SET
9673 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9674 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9675 emit_insn_after (gen_unop (), insn);
9676 }
9677 }
9678 }
9679 \f
9680 /* Machine dependent reorg pass. */
9681
9682 static void
9683 alpha_reorg (void)
9684 {
9685 /* Workaround for a linker error that triggers when an
9686 exception handler immediatelly follows a noreturn function.
9687
9688 The instruction stream from an object file:
9689
9690 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9691 58: 00 00 ba 27 ldah gp,0(ra)
9692 5c: 00 00 bd 23 lda gp,0(gp)
9693 60: 00 00 7d a7 ldq t12,0(gp)
9694 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9695
9696 was converted in the final link pass to:
9697
9698 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9699 fdb28: 00 00 fe 2f unop
9700 fdb2c: 00 00 fe 2f unop
9701 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9702 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9703
9704 GP load instructions were wrongly cleared by the linker relaxation
9705 pass. This workaround prevents removal of GP loads by inserting
9706 an unop instruction between a noreturn function call and
9707 exception handler prologue. */
9708
9709 if (current_function_has_exception_handlers ())
9710 alpha_pad_noreturn ();
9711
9712 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9713 alpha_handle_trap_shadows ();
9714
9715 /* Due to the number of extra trapb insns, don't bother fixing up
9716 alignment when trap precision is instruction. Moreover, we can
9717 only do our job when sched2 is run. */
9718 if (optimize && !optimize_size
9719 && alpha_tp != ALPHA_TP_INSN
9720 && flag_schedule_insns_after_reload)
9721 {
9722 if (alpha_tune == PROCESSOR_EV4)
9723 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9724 else if (alpha_tune == PROCESSOR_EV5)
9725 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9726 }
9727 }
9728 \f
9729 #if !TARGET_ABI_UNICOSMK
9730
9731 #ifdef HAVE_STAMP_H
9732 #include <stamp.h>
9733 #endif
9734
9735 static void
9736 alpha_file_start (void)
9737 {
9738 #ifdef OBJECT_FORMAT_ELF
9739 /* If emitting dwarf2 debug information, we cannot generate a .file
9740 directive to start the file, as it will conflict with dwarf2out
9741 file numbers. So it's only useful when emitting mdebug output. */
9742 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9743 #endif
9744
9745 default_file_start ();
9746 #ifdef MS_STAMP
9747 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9748 #endif
9749
9750 fputs ("\t.set noreorder\n", asm_out_file);
9751 fputs ("\t.set volatile\n", asm_out_file);
9752 if (!TARGET_ABI_OPEN_VMS)
9753 fputs ("\t.set noat\n", asm_out_file);
9754 if (TARGET_EXPLICIT_RELOCS)
9755 fputs ("\t.set nomacro\n", asm_out_file);
9756 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9757 {
9758 const char *arch;
9759
9760 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9761 arch = "ev6";
9762 else if (TARGET_MAX)
9763 arch = "pca56";
9764 else if (TARGET_BWX)
9765 arch = "ev56";
9766 else if (alpha_cpu == PROCESSOR_EV5)
9767 arch = "ev5";
9768 else
9769 arch = "ev4";
9770
9771 fprintf (asm_out_file, "\t.arch %s\n", arch);
9772 }
9773 }
9774 #endif
9775
9776 #ifdef OBJECT_FORMAT_ELF
9777 /* Since we don't have a .dynbss section, we should not allow global
9778 relocations in the .rodata section. */
9779
9780 static int
9781 alpha_elf_reloc_rw_mask (void)
9782 {
9783 return flag_pic ? 3 : 2;
9784 }
9785
9786 /* Return a section for X. The only special thing we do here is to
9787 honor small data. */
9788
9789 static section *
9790 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9791 unsigned HOST_WIDE_INT align)
9792 {
9793 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9794 /* ??? Consider using mergeable sdata sections. */
9795 return sdata_section;
9796 else
9797 return default_elf_select_rtx_section (mode, x, align);
9798 }
9799
9800 static unsigned int
9801 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9802 {
9803 unsigned int flags = 0;
9804
9805 if (strcmp (name, ".sdata") == 0
9806 || strncmp (name, ".sdata.", 7) == 0
9807 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9808 || strcmp (name, ".sbss") == 0
9809 || strncmp (name, ".sbss.", 6) == 0
9810 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9811 flags = SECTION_SMALL;
9812
9813 flags |= default_section_type_flags (decl, name, reloc);
9814 return flags;
9815 }
9816 #endif /* OBJECT_FORMAT_ELF */
9817 \f
9818 /* Structure to collect function names for final output in link section. */
9819 /* Note that items marked with GTY can't be ifdef'ed out. */
9820
9821 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9822 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9823
9824 struct GTY(()) alpha_links
9825 {
9826 int num;
9827 const char *target;
9828 rtx linkage;
9829 enum links_kind lkind;
9830 enum reloc_kind rkind;
9831 };
9832
9833 struct GTY(()) alpha_funcs
9834 {
9835 int num;
9836 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9837 links;
9838 };
9839
9840 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9841 splay_tree alpha_links_tree;
9842 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9843 splay_tree alpha_funcs_tree;
9844
9845 static GTY(()) int alpha_funcs_num;
9846
9847 #if TARGET_ABI_OPEN_VMS
9848
9849 /* Return the VMS argument type corresponding to MODE. */
9850
9851 enum avms_arg_type
9852 alpha_arg_type (enum machine_mode mode)
9853 {
9854 switch (mode)
9855 {
9856 case SFmode:
9857 return TARGET_FLOAT_VAX ? FF : FS;
9858 case DFmode:
9859 return TARGET_FLOAT_VAX ? FD : FT;
9860 default:
9861 return I64;
9862 }
9863 }
9864
9865 /* Return an rtx for an integer representing the VMS Argument Information
9866 register value. */
9867
9868 rtx
9869 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9870 {
9871 unsigned HOST_WIDE_INT regval = cum.num_args;
9872 int i;
9873
9874 for (i = 0; i < 6; i++)
9875 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9876
9877 return GEN_INT (regval);
9878 }
9879 \f
9880 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9881 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9882 Return a SYMBOL_REF suited to the call instruction. */
9883
9884 rtx
9885 alpha_need_linkage (const char *name, int is_local)
9886 {
9887 splay_tree_node node;
9888 struct alpha_links *al;
9889 const char *target;
9890 tree id;
9891
9892 if (name[0] == '*')
9893 name++;
9894
9895 if (is_local)
9896 {
9897 struct alpha_funcs *cfaf;
9898
9899 if (!alpha_funcs_tree)
9900 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9901 splay_tree_compare_pointers);
9902
9903 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9904
9905 cfaf->links = 0;
9906 cfaf->num = ++alpha_funcs_num;
9907
9908 splay_tree_insert (alpha_funcs_tree,
9909 (splay_tree_key) current_function_decl,
9910 (splay_tree_value) cfaf);
9911 }
9912
9913 if (alpha_links_tree)
9914 {
9915 /* Is this name already defined? */
9916
9917 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9918 if (node)
9919 {
9920 al = (struct alpha_links *) node->value;
9921 if (is_local)
9922 {
9923 /* Defined here but external assumed. */
9924 if (al->lkind == KIND_EXTERN)
9925 al->lkind = KIND_LOCAL;
9926 }
9927 else
9928 {
9929 /* Used here but unused assumed. */
9930 if (al->lkind == KIND_UNUSED)
9931 al->lkind = KIND_LOCAL;
9932 }
9933 return al->linkage;
9934 }
9935 }
9936 else
9937 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9938
9939 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9940 name = ggc_strdup (name);
9941
9942 /* Assume external if no definition. */
9943 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9944
9945 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9946 and find the ultimate alias target like assemble_name. */
9947 id = get_identifier (name);
9948 target = NULL;
9949 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9950 {
9951 id = TREE_CHAIN (id);
9952 target = IDENTIFIER_POINTER (id);
9953 }
9954
9955 al->target = target ? target : name;
9956 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9957
9958 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9959 (splay_tree_value) al);
9960
9961 return al->linkage;
9962 }
9963
9964 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9965 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9966 this is the reference to the linkage pointer value, 0 if this is the
9967 reference to the function entry value. RFLAG is 1 if this a reduced
9968 reference (code address only), 0 if this is a full reference. */
9969
9970 rtx
9971 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9972 {
9973 splay_tree_node cfunnode;
9974 struct alpha_funcs *cfaf;
9975 struct alpha_links *al;
9976 const char *name = XSTR (func, 0);
9977
9978 cfaf = (struct alpha_funcs *) 0;
9979 al = (struct alpha_links *) 0;
9980
9981 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9982 cfaf = (struct alpha_funcs *) cfunnode->value;
9983
9984 if (cfaf->links)
9985 {
9986 splay_tree_node lnode;
9987
9988 /* Is this name already defined? */
9989
9990 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9991 if (lnode)
9992 al = (struct alpha_links *) lnode->value;
9993 }
9994 else
9995 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9996
9997 if (!al)
9998 {
9999 size_t name_len;
10000 size_t buflen;
10001 char *linksym;
10002 splay_tree_node node = 0;
10003 struct alpha_links *anl;
10004
10005 if (name[0] == '*')
10006 name++;
10007
10008 name_len = strlen (name);
10009 linksym = (char *) alloca (name_len + 50);
10010
10011 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
10012 al->num = cfaf->num;
10013
10014 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
10015 if (node)
10016 {
10017 anl = (struct alpha_links *) node->value;
10018 al->lkind = anl->lkind;
10019 name = anl->target;
10020 }
10021
10022 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
10023 buflen = strlen (linksym);
10024
10025 al->linkage = gen_rtx_SYMBOL_REF
10026 (Pmode, ggc_alloc_string (linksym, buflen + 1));
10027
10028 splay_tree_insert (cfaf->links, (splay_tree_key) name,
10029 (splay_tree_value) al);
10030 }
10031
10032 if (rflag)
10033 al->rkind = KIND_CODEADDR;
10034 else
10035 al->rkind = KIND_LINKAGE;
10036
10037 if (lflag)
10038 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
10039 else
10040 return al->linkage;
10041 }
10042
10043 static int
10044 alpha_write_one_linkage (splay_tree_node node, void *data)
10045 {
10046 const char *const name = (const char *) node->key;
10047 struct alpha_links *link = (struct alpha_links *) node->value;
10048 FILE *stream = (FILE *) data;
10049
10050 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
10051 if (link->rkind == KIND_CODEADDR)
10052 {
10053 if (link->lkind == KIND_LOCAL)
10054 {
10055 /* Local and used */
10056 fprintf (stream, "\t.quad %s..en\n", name);
10057 }
10058 else
10059 {
10060 /* External and used, request code address. */
10061 fprintf (stream, "\t.code_address %s\n", name);
10062 }
10063 }
10064 else
10065 {
10066 if (link->lkind == KIND_LOCAL)
10067 {
10068 /* Local and used, build linkage pair. */
10069 fprintf (stream, "\t.quad %s..en\n", name);
10070 fprintf (stream, "\t.quad %s\n", name);
10071 }
10072 else
10073 {
10074 /* External and used, request linkage pair. */
10075 fprintf (stream, "\t.linkage %s\n", name);
10076 }
10077 }
10078
10079 return 0;
10080 }
10081
10082 static void
10083 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
10084 {
10085 splay_tree_node node;
10086 struct alpha_funcs *func;
10087
10088 fprintf (stream, "\t.link\n");
10089 fprintf (stream, "\t.align 3\n");
10090 in_section = NULL;
10091
10092 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
10093 func = (struct alpha_funcs *) node->value;
10094
10095 fputs ("\t.name ", stream);
10096 assemble_name (stream, funname);
10097 fputs ("..na\n", stream);
10098 ASM_OUTPUT_LABEL (stream, funname);
10099 fprintf (stream, "\t.pdesc ");
10100 assemble_name (stream, funname);
10101 fprintf (stream, "..en,%s\n",
10102 alpha_procedure_type == PT_STACK ? "stack"
10103 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
10104
10105 if (func->links)
10106 {
10107 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
10108 /* splay_tree_delete (func->links); */
10109 }
10110 }
10111
10112 /* Switch to an arbitrary section NAME with attributes as specified
10113 by FLAGS. ALIGN specifies any known alignment requirements for
10114 the section; 0 if the default should be used. */
10115
10116 static void
10117 vms_asm_named_section (const char *name, unsigned int flags,
10118 tree decl ATTRIBUTE_UNUSED)
10119 {
10120 fputc ('\n', asm_out_file);
10121 fprintf (asm_out_file, ".section\t%s", name);
10122
10123 if (flags & SECTION_DEBUG)
10124 fprintf (asm_out_file, ",NOWRT");
10125
10126 fputc ('\n', asm_out_file);
10127 }
10128
10129 /* Record an element in the table of global constructors. SYMBOL is
10130 a SYMBOL_REF of the function to be called; PRIORITY is a number
10131 between 0 and MAX_INIT_PRIORITY.
10132
10133 Differs from default_ctors_section_asm_out_constructor in that the
10134 width of the .ctors entry is always 64 bits, rather than the 32 bits
10135 used by a normal pointer. */
10136
10137 static void
10138 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10139 {
10140 switch_to_section (ctors_section);
10141 assemble_align (BITS_PER_WORD);
10142 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10143 }
10144
10145 static void
10146 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
10147 {
10148 switch_to_section (dtors_section);
10149 assemble_align (BITS_PER_WORD);
10150 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
10151 }
10152 #else
10153
10154 rtx
10155 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
10156 int is_local ATTRIBUTE_UNUSED)
10157 {
10158 return NULL_RTX;
10159 }
10160
10161 rtx
10162 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
10163 tree cfundecl ATTRIBUTE_UNUSED,
10164 int lflag ATTRIBUTE_UNUSED,
10165 int rflag ATTRIBUTE_UNUSED)
10166 {
10167 return NULL_RTX;
10168 }
10169
10170 #endif /* TARGET_ABI_OPEN_VMS */
10171 \f
10172 #if TARGET_ABI_UNICOSMK
10173
10174 /* This evaluates to true if we do not know how to pass TYPE solely in
10175 registers. This is the case for all arguments that do not fit in two
10176 registers. */
10177
10178 static bool
10179 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
10180 {
10181 if (type == NULL)
10182 return false;
10183
10184 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10185 return true;
10186 if (TREE_ADDRESSABLE (type))
10187 return true;
10188
10189 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
10190 }
10191
10192 /* Define the offset between two registers, one to be eliminated, and the
10193 other its replacement, at the start of a routine. */
10194
10195 int
10196 unicosmk_initial_elimination_offset (int from, int to)
10197 {
10198 int fixed_size;
10199
10200 fixed_size = alpha_sa_size();
10201 if (fixed_size != 0)
10202 fixed_size += 48;
10203
10204 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10205 return -fixed_size;
10206 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
10207 return 0;
10208 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10209 return (ALPHA_ROUND (crtl->outgoing_args_size)
10210 + ALPHA_ROUND (get_frame_size()));
10211 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
10212 return (ALPHA_ROUND (fixed_size)
10213 + ALPHA_ROUND (get_frame_size()
10214 + crtl->outgoing_args_size));
10215 else
10216 gcc_unreachable ();
10217 }
10218
10219 /* Output the module name for .ident and .end directives. We have to strip
10220 directories and add make sure that the module name starts with a letter
10221 or '$'. */
10222
10223 static void
10224 unicosmk_output_module_name (FILE *file)
10225 {
10226 const char *name = lbasename (main_input_filename);
10227 unsigned len = strlen (name);
10228 char *clean_name = alloca (len + 2);
10229 char *ptr = clean_name;
10230
10231 /* CAM only accepts module names that start with a letter or '$'. We
10232 prefix the module name with a '$' if necessary. */
10233
10234 if (!ISALPHA (*name))
10235 *ptr++ = '$';
10236 memcpy (ptr, name, len + 1);
10237 clean_symbol_name (clean_name);
10238 fputs (clean_name, file);
10239 }
10240
10241 /* Output the definition of a common variable. */
10242
10243 void
10244 unicosmk_output_common (FILE *file, const char *name, int size, int align)
10245 {
10246 tree name_tree;
10247 printf ("T3E__: common %s\n", name);
10248
10249 in_section = NULL;
10250 fputs("\t.endp\n\n\t.psect ", file);
10251 assemble_name(file, name);
10252 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
10253 fprintf(file, "\t.byte\t0:%d\n", size);
10254
10255 /* Mark the symbol as defined in this module. */
10256 name_tree = get_identifier (name);
10257 TREE_ASM_WRITTEN (name_tree) = 1;
10258 }
10259
10260 #define SECTION_PUBLIC SECTION_MACH_DEP
10261 #define SECTION_MAIN (SECTION_PUBLIC << 1)
10262 static int current_section_align;
10263
10264 /* A get_unnamed_section callback for switching to the text section. */
10265
10266 static void
10267 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10268 {
10269 static int count = 0;
10270 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
10271 }
10272
10273 /* A get_unnamed_section callback for switching to the data section. */
10274
10275 static void
10276 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
10277 {
10278 static int count = 1;
10279 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
10280 }
10281
10282 /* Implement TARGET_ASM_INIT_SECTIONS.
10283
10284 The Cray assembler is really weird with respect to sections. It has only
10285 named sections and you can't reopen a section once it has been closed.
10286 This means that we have to generate unique names whenever we want to
10287 reenter the text or the data section. */
10288
10289 static void
10290 unicosmk_init_sections (void)
10291 {
10292 text_section = get_unnamed_section (SECTION_CODE,
10293 unicosmk_output_text_section_asm_op,
10294 NULL);
10295 data_section = get_unnamed_section (SECTION_WRITE,
10296 unicosmk_output_data_section_asm_op,
10297 NULL);
10298 readonly_data_section = data_section;
10299 }
10300
10301 static unsigned int
10302 unicosmk_section_type_flags (tree decl, const char *name,
10303 int reloc ATTRIBUTE_UNUSED)
10304 {
10305 unsigned int flags = default_section_type_flags (decl, name, reloc);
10306
10307 if (!decl)
10308 return flags;
10309
10310 if (TREE_CODE (decl) == FUNCTION_DECL)
10311 {
10312 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10313 if (align_functions_log > current_section_align)
10314 current_section_align = align_functions_log;
10315
10316 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10317 flags |= SECTION_MAIN;
10318 }
10319 else
10320 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10321
10322 if (TREE_PUBLIC (decl))
10323 flags |= SECTION_PUBLIC;
10324
10325 return flags;
10326 }
10327
10328 /* Generate a section name for decl and associate it with the
10329 declaration. */
10330
10331 static void
10332 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10333 {
10334 const char *name;
10335 int len;
10336
10337 gcc_assert (decl);
10338
10339 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10340 name = default_strip_name_encoding (name);
10341 len = strlen (name);
10342
10343 if (TREE_CODE (decl) == FUNCTION_DECL)
10344 {
10345 char *string;
10346
10347 /* It is essential that we prefix the section name here because
10348 otherwise the section names generated for constructors and
10349 destructors confuse collect2. */
10350
10351 string = alloca (len + 6);
10352 sprintf (string, "code@%s", name);
10353 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10354 }
10355 else if (TREE_PUBLIC (decl))
10356 DECL_SECTION_NAME (decl) = build_string (len, name);
10357 else
10358 {
10359 char *string;
10360
10361 string = alloca (len + 6);
10362 sprintf (string, "data@%s", name);
10363 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10364 }
10365 }
10366
10367 /* Switch to an arbitrary section NAME with attributes as specified
10368 by FLAGS. ALIGN specifies any known alignment requirements for
10369 the section; 0 if the default should be used. */
10370
10371 static void
10372 unicosmk_asm_named_section (const char *name, unsigned int flags,
10373 tree decl ATTRIBUTE_UNUSED)
10374 {
10375 const char *kind;
10376
10377 /* Close the previous section. */
10378
10379 fputs ("\t.endp\n\n", asm_out_file);
10380
10381 /* Find out what kind of section we are opening. */
10382
10383 if (flags & SECTION_MAIN)
10384 fputs ("\t.start\tmain\n", asm_out_file);
10385
10386 if (flags & SECTION_CODE)
10387 kind = "code";
10388 else if (flags & SECTION_PUBLIC)
10389 kind = "common";
10390 else
10391 kind = "data";
10392
10393 if (current_section_align != 0)
10394 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10395 current_section_align, kind);
10396 else
10397 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10398 }
10399
10400 static void
10401 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10402 {
10403 if (DECL_P (decl)
10404 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10405 unicosmk_unique_section (decl, 0);
10406 }
10407
10408 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10409 in code sections because .align fill unused space with zeroes. */
10410
10411 void
10412 unicosmk_output_align (FILE *file, int align)
10413 {
10414 if (inside_function)
10415 fprintf (file, "\tgcc@code@align\t%d\n", align);
10416 else
10417 fprintf (file, "\t.align\t%d\n", align);
10418 }
10419
10420 /* Add a case vector to the current function's list of deferred case
10421 vectors. Case vectors have to be put into a separate section because CAM
10422 does not allow data definitions in code sections. */
10423
10424 void
10425 unicosmk_defer_case_vector (rtx lab, rtx vec)
10426 {
10427 struct machine_function *machine = cfun->machine;
10428
10429 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10430 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10431 machine->addr_list);
10432 }
10433
10434 /* Output a case vector. */
10435
10436 static void
10437 unicosmk_output_addr_vec (FILE *file, rtx vec)
10438 {
10439 rtx lab = XEXP (vec, 0);
10440 rtx body = XEXP (vec, 1);
10441 int vlen = XVECLEN (body, 0);
10442 int idx;
10443
10444 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10445
10446 for (idx = 0; idx < vlen; idx++)
10447 {
10448 ASM_OUTPUT_ADDR_VEC_ELT
10449 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10450 }
10451 }
10452
10453 /* Output current function's deferred case vectors. */
10454
10455 static void
10456 unicosmk_output_deferred_case_vectors (FILE *file)
10457 {
10458 struct machine_function *machine = cfun->machine;
10459 rtx t;
10460
10461 if (machine->addr_list == NULL_RTX)
10462 return;
10463
10464 switch_to_section (data_section);
10465 for (t = machine->addr_list; t; t = XEXP (t, 1))
10466 unicosmk_output_addr_vec (file, XEXP (t, 0));
10467 }
10468
10469 /* Generate the name of the SSIB section for the current function. */
10470
10471 #define SSIB_PREFIX "__SSIB_"
10472 #define SSIB_PREFIX_LEN 7
10473
10474 static const char *
10475 unicosmk_ssib_name (void)
10476 {
10477 /* This is ok since CAM won't be able to deal with names longer than that
10478 anyway. */
10479
10480 static char name[256];
10481
10482 rtx x;
10483 const char *fnname;
10484 int len;
10485
10486 x = DECL_RTL (cfun->decl);
10487 gcc_assert (MEM_P (x));
10488 x = XEXP (x, 0);
10489 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10490 fnname = XSTR (x, 0);
10491
10492 len = strlen (fnname);
10493 if (len + SSIB_PREFIX_LEN > 255)
10494 len = 255 - SSIB_PREFIX_LEN;
10495
10496 strcpy (name, SSIB_PREFIX);
10497 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10498 name[len + SSIB_PREFIX_LEN] = 0;
10499
10500 return name;
10501 }
10502
10503 /* Set up the dynamic subprogram information block (DSIB) and update the
10504 frame pointer register ($15) for subroutines which have a frame. If the
10505 subroutine doesn't have a frame, simply increment $15. */
10506
10507 static void
10508 unicosmk_gen_dsib (unsigned long *imaskP)
10509 {
10510 if (alpha_procedure_type == PT_STACK)
10511 {
10512 const char *ssib_name;
10513 rtx mem;
10514
10515 /* Allocate 64 bytes for the DSIB. */
10516
10517 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10518 GEN_INT (-64))));
10519 emit_insn (gen_blockage ());
10520
10521 /* Save the return address. */
10522
10523 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10524 set_mem_alias_set (mem, alpha_sr_alias_set);
10525 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10526 (*imaskP) &= ~(1UL << REG_RA);
10527
10528 /* Save the old frame pointer. */
10529
10530 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10531 set_mem_alias_set (mem, alpha_sr_alias_set);
10532 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10533 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10534
10535 emit_insn (gen_blockage ());
10536
10537 /* Store the SSIB pointer. */
10538
10539 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10540 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10541 set_mem_alias_set (mem, alpha_sr_alias_set);
10542
10543 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10544 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10545 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10546
10547 /* Save the CIW index. */
10548
10549 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10550 set_mem_alias_set (mem, alpha_sr_alias_set);
10551 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10552
10553 emit_insn (gen_blockage ());
10554
10555 /* Set the new frame pointer. */
10556 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10557 stack_pointer_rtx, GEN_INT (64))));
10558 }
10559 else
10560 {
10561 /* Increment the frame pointer register to indicate that we do not
10562 have a frame. */
10563 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10564 hard_frame_pointer_rtx, const1_rtx));
10565 }
10566 }
10567
10568 /* Output the static subroutine information block for the current
10569 function. */
10570
10571 static void
10572 unicosmk_output_ssib (FILE *file, const char *fnname)
10573 {
10574 int len;
10575 int i;
10576 rtx x;
10577 rtx ciw;
10578 struct machine_function *machine = cfun->machine;
10579
10580 in_section = NULL;
10581 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10582 unicosmk_ssib_name ());
10583
10584 /* Some required stuff and the function name length. */
10585
10586 len = strlen (fnname);
10587 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10588
10589 /* Saved registers
10590 ??? We don't do that yet. */
10591
10592 fputs ("\t.quad\t0\n", file);
10593
10594 /* Function address. */
10595
10596 fputs ("\t.quad\t", file);
10597 assemble_name (file, fnname);
10598 putc ('\n', file);
10599
10600 fputs ("\t.quad\t0\n", file);
10601 fputs ("\t.quad\t0\n", file);
10602
10603 /* Function name.
10604 ??? We do it the same way Cray CC does it but this could be
10605 simplified. */
10606
10607 for( i = 0; i < len; i++ )
10608 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10609 if( (len % 8) == 0 )
10610 fputs ("\t.quad\t0\n", file);
10611 else
10612 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10613
10614 /* All call information words used in the function. */
10615
10616 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10617 {
10618 ciw = XEXP (x, 0);
10619 #if HOST_BITS_PER_WIDE_INT == 32
10620 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10621 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10622 #else
10623 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10624 #endif
10625 }
10626 }
10627
10628 /* Add a call information word (CIW) to the list of the current function's
10629 CIWs and return its index.
10630
10631 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10632
10633 rtx
10634 unicosmk_add_call_info_word (rtx x)
10635 {
10636 rtx node;
10637 struct machine_function *machine = cfun->machine;
10638
10639 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10640 if (machine->first_ciw == NULL_RTX)
10641 machine->first_ciw = node;
10642 else
10643 XEXP (machine->last_ciw, 1) = node;
10644
10645 machine->last_ciw = node;
10646 ++machine->ciw_count;
10647
10648 return GEN_INT (machine->ciw_count
10649 + strlen (current_function_name ())/8 + 5);
10650 }
10651
10652 /* The Cray assembler doesn't accept extern declarations for symbols which
10653 are defined in the same file. We have to keep track of all global
10654 symbols which are referenced and/or defined in a source file and output
10655 extern declarations for those which are referenced but not defined at
10656 the end of file. */
10657
10658 /* List of identifiers for which an extern declaration might have to be
10659 emitted. */
10660 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10661
10662 struct unicosmk_extern_list
10663 {
10664 struct unicosmk_extern_list *next;
10665 const char *name;
10666 };
10667
10668 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10669
10670 /* Output extern declarations which are required for every asm file. */
10671
10672 static void
10673 unicosmk_output_default_externs (FILE *file)
10674 {
10675 static const char *const externs[] =
10676 { "__T3E_MISMATCH" };
10677
10678 int i;
10679 int n;
10680
10681 n = ARRAY_SIZE (externs);
10682
10683 for (i = 0; i < n; i++)
10684 fprintf (file, "\t.extern\t%s\n", externs[i]);
10685 }
10686
10687 /* Output extern declarations for global symbols which are have been
10688 referenced but not defined. */
10689
10690 static void
10691 unicosmk_output_externs (FILE *file)
10692 {
10693 struct unicosmk_extern_list *p;
10694 const char *real_name;
10695 int len;
10696 tree name_tree;
10697
10698 len = strlen (user_label_prefix);
10699 for (p = unicosmk_extern_head; p != 0; p = p->next)
10700 {
10701 /* We have to strip the encoding and possibly remove user_label_prefix
10702 from the identifier in order to handle -fleading-underscore and
10703 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10704 real_name = default_strip_name_encoding (p->name);
10705 if (len && p->name[0] == '*'
10706 && !memcmp (real_name, user_label_prefix, len))
10707 real_name += len;
10708
10709 name_tree = get_identifier (real_name);
10710 if (! TREE_ASM_WRITTEN (name_tree))
10711 {
10712 TREE_ASM_WRITTEN (name_tree) = 1;
10713 fputs ("\t.extern\t", file);
10714 assemble_name (file, p->name);
10715 putc ('\n', file);
10716 }
10717 }
10718 }
10719
10720 /* Record an extern. */
10721
10722 void
10723 unicosmk_add_extern (const char *name)
10724 {
10725 struct unicosmk_extern_list *p;
10726
10727 p = (struct unicosmk_extern_list *)
10728 xmalloc (sizeof (struct unicosmk_extern_list));
10729 p->next = unicosmk_extern_head;
10730 p->name = name;
10731 unicosmk_extern_head = p;
10732 }
10733
10734 /* The Cray assembler generates incorrect code if identifiers which
10735 conflict with register names are used as instruction operands. We have
10736 to replace such identifiers with DEX expressions. */
10737
10738 /* Structure to collect identifiers which have been replaced by DEX
10739 expressions. */
10740 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10741
10742 struct unicosmk_dex {
10743 struct unicosmk_dex *next;
10744 const char *name;
10745 };
10746
10747 /* List of identifiers which have been replaced by DEX expressions. The DEX
10748 number is determined by the position in the list. */
10749
10750 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10751
10752 /* The number of elements in the DEX list. */
10753
10754 static int unicosmk_dex_count = 0;
10755
10756 /* Check if NAME must be replaced by a DEX expression. */
10757
10758 static int
10759 unicosmk_special_name (const char *name)
10760 {
10761 if (name[0] == '*')
10762 ++name;
10763
10764 if (name[0] == '$')
10765 ++name;
10766
10767 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10768 return 0;
10769
10770 switch (name[1])
10771 {
10772 case '1': case '2':
10773 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10774
10775 case '3':
10776 return (name[2] == '\0'
10777 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10778
10779 default:
10780 return (ISDIGIT (name[1]) && name[2] == '\0');
10781 }
10782 }
10783
10784 /* Return the DEX number if X must be replaced by a DEX expression and 0
10785 otherwise. */
10786
10787 static int
10788 unicosmk_need_dex (rtx x)
10789 {
10790 struct unicosmk_dex *dex;
10791 const char *name;
10792 int i;
10793
10794 if (GET_CODE (x) != SYMBOL_REF)
10795 return 0;
10796
10797 name = XSTR (x,0);
10798 if (! unicosmk_special_name (name))
10799 return 0;
10800
10801 i = unicosmk_dex_count;
10802 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10803 {
10804 if (! strcmp (name, dex->name))
10805 return i;
10806 --i;
10807 }
10808
10809 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10810 dex->name = name;
10811 dex->next = unicosmk_dex_list;
10812 unicosmk_dex_list = dex;
10813
10814 ++unicosmk_dex_count;
10815 return unicosmk_dex_count;
10816 }
10817
10818 /* Output the DEX definitions for this file. */
10819
10820 static void
10821 unicosmk_output_dex (FILE *file)
10822 {
10823 struct unicosmk_dex *dex;
10824 int i;
10825
10826 if (unicosmk_dex_list == NULL)
10827 return;
10828
10829 fprintf (file, "\t.dexstart\n");
10830
10831 i = unicosmk_dex_count;
10832 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10833 {
10834 fprintf (file, "\tDEX (%d) = ", i);
10835 assemble_name (file, dex->name);
10836 putc ('\n', file);
10837 --i;
10838 }
10839
10840 fprintf (file, "\t.dexend\n");
10841 }
10842
10843 /* Output text that to appear at the beginning of an assembler file. */
10844
10845 static void
10846 unicosmk_file_start (void)
10847 {
10848 int i;
10849
10850 fputs ("\t.ident\t", asm_out_file);
10851 unicosmk_output_module_name (asm_out_file);
10852 fputs ("\n\n", asm_out_file);
10853
10854 /* The Unicos/Mk assembler uses different register names. Instead of trying
10855 to support them, we simply use micro definitions. */
10856
10857 /* CAM has different register names: rN for the integer register N and fN
10858 for the floating-point register N. Instead of trying to use these in
10859 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10860 register. */
10861
10862 for (i = 0; i < 32; ++i)
10863 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10864
10865 for (i = 0; i < 32; ++i)
10866 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10867
10868 putc ('\n', asm_out_file);
10869
10870 /* The .align directive fill unused space with zeroes which does not work
10871 in code sections. We define the macro 'gcc@code@align' which uses nops
10872 instead. Note that it assumes that code sections always have the
10873 biggest possible alignment since . refers to the current offset from
10874 the beginning of the section. */
10875
10876 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10877 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10878 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10879 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10880 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10881 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10882 fputs ("\t.endr\n", asm_out_file);
10883 fputs ("\t.endif\n", asm_out_file);
10884 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10885
10886 /* Output extern declarations which should always be visible. */
10887 unicosmk_output_default_externs (asm_out_file);
10888
10889 /* Open a dummy section. We always need to be inside a section for the
10890 section-switching code to work correctly.
10891 ??? This should be a module id or something like that. I still have to
10892 figure out what the rules for those are. */
10893 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10894 }
10895
10896 /* Output text to appear at the end of an assembler file. This includes all
10897 pending extern declarations and DEX expressions. */
10898
10899 static void
10900 unicosmk_file_end (void)
10901 {
10902 fputs ("\t.endp\n\n", asm_out_file);
10903
10904 /* Output all pending externs. */
10905
10906 unicosmk_output_externs (asm_out_file);
10907
10908 /* Output dex definitions used for functions whose names conflict with
10909 register names. */
10910
10911 unicosmk_output_dex (asm_out_file);
10912
10913 fputs ("\t.end\t", asm_out_file);
10914 unicosmk_output_module_name (asm_out_file);
10915 putc ('\n', asm_out_file);
10916 }
10917
10918 #else
10919
10920 static void
10921 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10922 {}
10923
10924 static void
10925 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10926 {}
10927
10928 static void
10929 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10930 const char * fnname ATTRIBUTE_UNUSED)
10931 {}
10932
10933 rtx
10934 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10935 {
10936 return NULL_RTX;
10937 }
10938
10939 static int
10940 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10941 {
10942 return 0;
10943 }
10944
10945 #endif /* TARGET_ABI_UNICOSMK */
10946
10947 static void
10948 alpha_init_libfuncs (void)
10949 {
10950 if (TARGET_ABI_UNICOSMK)
10951 {
10952 /* Prevent gcc from generating calls to __divsi3. */
10953 set_optab_libfunc (sdiv_optab, SImode, 0);
10954 set_optab_libfunc (udiv_optab, SImode, 0);
10955
10956 /* Use the functions provided by the system library
10957 for DImode integer division. */
10958 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10959 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10960 }
10961 else if (TARGET_ABI_OPEN_VMS)
10962 {
10963 /* Use the VMS runtime library functions for division and
10964 remainder. */
10965 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10966 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10967 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10968 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10969 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10970 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10971 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10972 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10973 abort_libfunc = init_one_libfunc ("decc$abort");
10974 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10975 #ifdef MEM_LIBFUNCS_INIT
10976 MEM_LIBFUNCS_INIT;
10977 #endif
10978 }
10979 }
10980
10981 \f
10982 /* Initialize the GCC target structure. */
10983 #if TARGET_ABI_OPEN_VMS
10984 # undef TARGET_ATTRIBUTE_TABLE
10985 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10986 #endif
10987
10988 #undef TARGET_IN_SMALL_DATA_P
10989 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10990
10991 #if TARGET_ABI_UNICOSMK
10992 # undef TARGET_INSERT_ATTRIBUTES
10993 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10994 # undef TARGET_SECTION_TYPE_FLAGS
10995 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10996 # undef TARGET_ASM_UNIQUE_SECTION
10997 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10998 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10999 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
11000 # undef TARGET_ASM_GLOBALIZE_LABEL
11001 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
11002 # undef TARGET_MUST_PASS_IN_STACK
11003 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
11004 #endif
11005
11006 #undef TARGET_ASM_ALIGNED_HI_OP
11007 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
11008 #undef TARGET_ASM_ALIGNED_DI_OP
11009 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
11010
11011 /* Default unaligned ops are provided for ELF systems. To get unaligned
11012 data for non-ELF systems, we have to turn off auto alignment. */
11013 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
11014 #undef TARGET_ASM_UNALIGNED_HI_OP
11015 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
11016 #undef TARGET_ASM_UNALIGNED_SI_OP
11017 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
11018 #undef TARGET_ASM_UNALIGNED_DI_OP
11019 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
11020 #endif
11021
11022 #ifdef OBJECT_FORMAT_ELF
11023 #undef TARGET_ASM_RELOC_RW_MASK
11024 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
11025 #undef TARGET_ASM_SELECT_RTX_SECTION
11026 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
11027 #undef TARGET_SECTION_TYPE_FLAGS
11028 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
11029 #endif
11030
11031 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
11032 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
11033
11034 #undef TARGET_INIT_LIBFUNCS
11035 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
11036
11037 #undef TARGET_LEGITIMIZE_ADDRESS
11038 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
11039
11040 #if TARGET_ABI_UNICOSMK
11041 #undef TARGET_ASM_FILE_START
11042 #define TARGET_ASM_FILE_START unicosmk_file_start
11043 #undef TARGET_ASM_FILE_END
11044 #define TARGET_ASM_FILE_END unicosmk_file_end
11045 #else
11046 #undef TARGET_ASM_FILE_START
11047 #define TARGET_ASM_FILE_START alpha_file_start
11048 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
11049 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
11050 #endif
11051
11052 #undef TARGET_SCHED_ADJUST_COST
11053 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
11054 #undef TARGET_SCHED_ISSUE_RATE
11055 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
11056 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
11057 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
11058 alpha_multipass_dfa_lookahead
11059
11060 #undef TARGET_HAVE_TLS
11061 #define TARGET_HAVE_TLS HAVE_AS_TLS
11062
11063 #undef TARGET_INIT_BUILTINS
11064 #define TARGET_INIT_BUILTINS alpha_init_builtins
11065 #undef TARGET_EXPAND_BUILTIN
11066 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
11067 #undef TARGET_FOLD_BUILTIN
11068 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
11069
11070 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
11071 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
11072 #undef TARGET_CANNOT_COPY_INSN_P
11073 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
11074 #undef TARGET_CANNOT_FORCE_CONST_MEM
11075 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
11076
11077 #if TARGET_ABI_OSF
11078 #undef TARGET_ASM_OUTPUT_MI_THUNK
11079 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
11080 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
11081 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
11082 #undef TARGET_STDARG_OPTIMIZE_HOOK
11083 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
11084 #endif
11085
11086 #undef TARGET_RTX_COSTS
11087 #define TARGET_RTX_COSTS alpha_rtx_costs
11088 #undef TARGET_ADDRESS_COST
11089 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
11090
11091 #undef TARGET_MACHINE_DEPENDENT_REORG
11092 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
11093
11094 #undef TARGET_PROMOTE_FUNCTION_MODE
11095 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
11096 #undef TARGET_PROMOTE_PROTOTYPES
11097 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
11098 #undef TARGET_RETURN_IN_MEMORY
11099 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
11100 #undef TARGET_PASS_BY_REFERENCE
11101 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
11102 #undef TARGET_SETUP_INCOMING_VARARGS
11103 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
11104 #undef TARGET_STRICT_ARGUMENT_NAMING
11105 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
11106 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
11107 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
11108 #undef TARGET_SPLIT_COMPLEX_ARG
11109 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
11110 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
11111 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
11112 #undef TARGET_ARG_PARTIAL_BYTES
11113 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
11114
11115 #undef TARGET_SECONDARY_RELOAD
11116 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
11117
11118 #undef TARGET_SCALAR_MODE_SUPPORTED_P
11119 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
11120 #undef TARGET_VECTOR_MODE_SUPPORTED_P
11121 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
11122
11123 #undef TARGET_BUILD_BUILTIN_VA_LIST
11124 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
11125
11126 #undef TARGET_EXPAND_BUILTIN_VA_START
11127 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
11128
11129 /* The Alpha architecture does not require sequential consistency. See
11130 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
11131 for an example of how it can be violated in practice. */
11132 #undef TARGET_RELAXED_ORDERING
11133 #define TARGET_RELAXED_ORDERING true
11134
11135 #undef TARGET_DEFAULT_TARGET_FLAGS
11136 #define TARGET_DEFAULT_TARGET_FLAGS \
11137 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
11138 #undef TARGET_HANDLE_OPTION
11139 #define TARGET_HANDLE_OPTION alpha_handle_option
11140
11141 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
11142 #undef TARGET_MANGLE_TYPE
11143 #define TARGET_MANGLE_TYPE alpha_mangle_type
11144 #endif
11145
11146 #undef TARGET_LEGITIMATE_ADDRESS_P
11147 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
11148
11149 struct gcc_target targetm = TARGET_INITIALIZER;
11150
11151 \f
11152 #include "gt-alpha.h"