alpha: Remove all big-endian code.
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "diagnostic-core.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include "splay-tree.h"
53 #include "cfglayout.h"
54 #include "gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59 #include "libfuncs.h"
60 #include "opts.h"
61
62 /* Specify which cpu to schedule for. */
63 enum processor_type alpha_tune;
64
65 /* Which cpu we're generating code for. */
66 enum processor_type alpha_cpu;
67
68 static const char * const alpha_cpu_name[] =
69 {
70 "ev4", "ev5", "ev6"
71 };
72
73 /* Specify how accurate floating-point traps need to be. */
74
75 enum alpha_trap_precision alpha_tp;
76
77 /* Specify the floating-point rounding mode. */
78
79 enum alpha_fp_rounding_mode alpha_fprm;
80
81 /* Specify which things cause traps. */
82
83 enum alpha_fp_trap_mode alpha_fptm;
84
85 /* Nonzero if inside of a function, because the Alpha asm can't
86 handle .files inside of functions. */
87
88 static int inside_function = FALSE;
89
90 /* The number of cycles of latency we should assume on memory reads. */
91
92 int alpha_memory_latency = 3;
93
94 /* Whether the function needs the GP. */
95
96 static int alpha_function_needs_gp;
97
98 /* The alias set for prologue/epilogue register save/restore. */
99
100 static GTY(()) alias_set_type alpha_sr_alias_set;
101
102 /* The assembler name of the current function. */
103
104 static const char *alpha_fnname;
105
106 /* The next explicit relocation sequence number. */
107 extern GTY(()) int alpha_next_sequence_number;
108 int alpha_next_sequence_number = 1;
109
110 /* The literal and gpdisp sequence numbers for this insn, as printed
111 by %# and %* respectively. */
112 extern GTY(()) int alpha_this_literal_sequence_number;
113 extern GTY(()) int alpha_this_gpdisp_sequence_number;
114 int alpha_this_literal_sequence_number;
115 int alpha_this_gpdisp_sequence_number;
116
117 /* Costs of various operations on the different architectures. */
118
119 struct alpha_rtx_cost_data
120 {
121 unsigned char fp_add;
122 unsigned char fp_mult;
123 unsigned char fp_div_sf;
124 unsigned char fp_div_df;
125 unsigned char int_mult_si;
126 unsigned char int_mult_di;
127 unsigned char int_shift;
128 unsigned char int_cmov;
129 unsigned short int_div;
130 };
131
132 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
133 {
134 { /* EV4 */
135 COSTS_N_INSNS (6), /* fp_add */
136 COSTS_N_INSNS (6), /* fp_mult */
137 COSTS_N_INSNS (34), /* fp_div_sf */
138 COSTS_N_INSNS (63), /* fp_div_df */
139 COSTS_N_INSNS (23), /* int_mult_si */
140 COSTS_N_INSNS (23), /* int_mult_di */
141 COSTS_N_INSNS (2), /* int_shift */
142 COSTS_N_INSNS (2), /* int_cmov */
143 COSTS_N_INSNS (97), /* int_div */
144 },
145 { /* EV5 */
146 COSTS_N_INSNS (4), /* fp_add */
147 COSTS_N_INSNS (4), /* fp_mult */
148 COSTS_N_INSNS (15), /* fp_div_sf */
149 COSTS_N_INSNS (22), /* fp_div_df */
150 COSTS_N_INSNS (8), /* int_mult_si */
151 COSTS_N_INSNS (12), /* int_mult_di */
152 COSTS_N_INSNS (1) + 1, /* int_shift */
153 COSTS_N_INSNS (1), /* int_cmov */
154 COSTS_N_INSNS (83), /* int_div */
155 },
156 { /* EV6 */
157 COSTS_N_INSNS (4), /* fp_add */
158 COSTS_N_INSNS (4), /* fp_mult */
159 COSTS_N_INSNS (12), /* fp_div_sf */
160 COSTS_N_INSNS (15), /* fp_div_df */
161 COSTS_N_INSNS (7), /* int_mult_si */
162 COSTS_N_INSNS (7), /* int_mult_di */
163 COSTS_N_INSNS (1), /* int_shift */
164 COSTS_N_INSNS (2), /* int_cmov */
165 COSTS_N_INSNS (86), /* int_div */
166 },
167 };
168
169 /* Similar but tuned for code size instead of execution latency. The
170 extra +N is fractional cost tuning based on latency. It's used to
171 encourage use of cheaper insns like shift, but only if there's just
172 one of them. */
173
174 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
175 {
176 COSTS_N_INSNS (1), /* fp_add */
177 COSTS_N_INSNS (1), /* fp_mult */
178 COSTS_N_INSNS (1), /* fp_div_sf */
179 COSTS_N_INSNS (1) + 1, /* fp_div_df */
180 COSTS_N_INSNS (1) + 1, /* int_mult_si */
181 COSTS_N_INSNS (1) + 2, /* int_mult_di */
182 COSTS_N_INSNS (1), /* int_shift */
183 COSTS_N_INSNS (1), /* int_cmov */
184 COSTS_N_INSNS (6), /* int_div */
185 };
186
187 /* Get the number of args of a function in one of two ways. */
188 #if TARGET_ABI_OPEN_VMS
189 #define NUM_ARGS crtl->args.info.num_args
190 #else
191 #define NUM_ARGS crtl->args.info
192 #endif
193
194 #define REG_PV 27
195 #define REG_RA 26
196
197 /* Declarations of static functions. */
198 static struct machine_function *alpha_init_machine_status (void);
199 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
200
201 #if TARGET_ABI_OPEN_VMS
202 static void alpha_write_linkage (FILE *, const char *, tree);
203 static bool vms_valid_pointer_mode (enum machine_mode);
204 #endif
205 \f
206 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
207 static const struct default_options alpha_option_optimization_table[] =
208 {
209 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
210 { OPT_LEVELS_NONE, 0, NULL, 0 }
211 };
212
213 /* Implement TARGET_HANDLE_OPTION. */
214
215 static bool
216 alpha_handle_option (struct gcc_options *opts,
217 struct gcc_options *opts_set ATTRIBUTE_UNUSED,
218 const struct cl_decoded_option *decoded,
219 location_t loc)
220 {
221 size_t code = decoded->opt_index;
222 const char *arg = decoded->arg;
223 int value = decoded->value;
224
225 switch (code)
226 {
227 case OPT_mfp_regs:
228 if (value == 0)
229 opts->x_target_flags |= MASK_SOFT_FP;
230 break;
231
232 case OPT_mieee:
233 case OPT_mieee_with_inexact:
234 opts->x_target_flags |= MASK_IEEE_CONFORMANT;
235 break;
236
237 case OPT_mtls_size_:
238 if (value != 16 && value != 32 && value != 64)
239 error_at (loc, "bad value %qs for -mtls-size switch", arg);
240 break;
241 }
242
243 return true;
244 }
245
246 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
247 /* Implement TARGET_MANGLE_TYPE. */
248
249 static const char *
250 alpha_mangle_type (const_tree type)
251 {
252 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
253 && TARGET_LONG_DOUBLE_128)
254 return "g";
255
256 /* For all other types, use normal C++ mangling. */
257 return NULL;
258 }
259 #endif
260
261 /* Parse target option strings. */
262
263 static void
264 alpha_option_override (void)
265 {
266 static const struct cpu_table {
267 const char *const name;
268 const enum processor_type processor;
269 const int flags;
270 } cpu_table[] = {
271 { "ev4", PROCESSOR_EV4, 0 },
272 { "ev45", PROCESSOR_EV4, 0 },
273 { "21064", PROCESSOR_EV4, 0 },
274 { "ev5", PROCESSOR_EV5, 0 },
275 { "21164", PROCESSOR_EV5, 0 },
276 { "ev56", PROCESSOR_EV5, MASK_BWX },
277 { "21164a", PROCESSOR_EV5, MASK_BWX },
278 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
279 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
280 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
281 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
282 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
283 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
284 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
285 };
286
287 int const ct_size = ARRAY_SIZE (cpu_table);
288 int i;
289
290 #ifdef SUBTARGET_OVERRIDE_OPTIONS
291 SUBTARGET_OVERRIDE_OPTIONS;
292 #endif
293
294 alpha_fprm = ALPHA_FPRM_NORM;
295 alpha_tp = ALPHA_TP_PROG;
296 alpha_fptm = ALPHA_FPTM_N;
297
298 if (TARGET_IEEE)
299 {
300 alpha_tp = ALPHA_TP_INSN;
301 alpha_fptm = ALPHA_FPTM_SU;
302 }
303 if (TARGET_IEEE_WITH_INEXACT)
304 {
305 alpha_tp = ALPHA_TP_INSN;
306 alpha_fptm = ALPHA_FPTM_SUI;
307 }
308
309 if (alpha_tp_string)
310 {
311 if (! strcmp (alpha_tp_string, "p"))
312 alpha_tp = ALPHA_TP_PROG;
313 else if (! strcmp (alpha_tp_string, "f"))
314 alpha_tp = ALPHA_TP_FUNC;
315 else if (! strcmp (alpha_tp_string, "i"))
316 alpha_tp = ALPHA_TP_INSN;
317 else
318 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
319 }
320
321 if (alpha_fprm_string)
322 {
323 if (! strcmp (alpha_fprm_string, "n"))
324 alpha_fprm = ALPHA_FPRM_NORM;
325 else if (! strcmp (alpha_fprm_string, "m"))
326 alpha_fprm = ALPHA_FPRM_MINF;
327 else if (! strcmp (alpha_fprm_string, "c"))
328 alpha_fprm = ALPHA_FPRM_CHOP;
329 else if (! strcmp (alpha_fprm_string,"d"))
330 alpha_fprm = ALPHA_FPRM_DYN;
331 else
332 error ("bad value %qs for -mfp-rounding-mode switch",
333 alpha_fprm_string);
334 }
335
336 if (alpha_fptm_string)
337 {
338 if (strcmp (alpha_fptm_string, "n") == 0)
339 alpha_fptm = ALPHA_FPTM_N;
340 else if (strcmp (alpha_fptm_string, "u") == 0)
341 alpha_fptm = ALPHA_FPTM_U;
342 else if (strcmp (alpha_fptm_string, "su") == 0)
343 alpha_fptm = ALPHA_FPTM_SU;
344 else if (strcmp (alpha_fptm_string, "sui") == 0)
345 alpha_fptm = ALPHA_FPTM_SUI;
346 else
347 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
348 }
349
350 if (alpha_cpu_string)
351 {
352 for (i = 0; i < ct_size; i++)
353 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
354 {
355 alpha_tune = alpha_cpu = cpu_table [i].processor;
356 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
357 target_flags |= cpu_table [i].flags;
358 break;
359 }
360 if (i == ct_size)
361 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
362 }
363
364 if (alpha_tune_string)
365 {
366 for (i = 0; i < ct_size; i++)
367 if (! strcmp (alpha_tune_string, cpu_table [i].name))
368 {
369 alpha_tune = cpu_table [i].processor;
370 break;
371 }
372 if (i == ct_size)
373 error ("bad value %qs for -mtune switch", alpha_tune_string);
374 }
375
376 /* Do some sanity checks on the above options. */
377
378 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
379 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
380 {
381 warning (0, "fp software completion requires -mtrap-precision=i");
382 alpha_tp = ALPHA_TP_INSN;
383 }
384
385 if (alpha_cpu == PROCESSOR_EV6)
386 {
387 /* Except for EV6 pass 1 (not released), we always have precise
388 arithmetic traps. Which means we can do software completion
389 without minding trap shadows. */
390 alpha_tp = ALPHA_TP_PROG;
391 }
392
393 if (TARGET_FLOAT_VAX)
394 {
395 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
396 {
397 warning (0, "rounding mode not supported for VAX floats");
398 alpha_fprm = ALPHA_FPRM_NORM;
399 }
400 if (alpha_fptm == ALPHA_FPTM_SUI)
401 {
402 warning (0, "trap mode not supported for VAX floats");
403 alpha_fptm = ALPHA_FPTM_SU;
404 }
405 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
406 warning (0, "128-bit long double not supported for VAX floats");
407 target_flags &= ~MASK_LONG_DOUBLE_128;
408 }
409
410 {
411 char *end;
412 int lat;
413
414 if (!alpha_mlat_string)
415 alpha_mlat_string = "L1";
416
417 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
418 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
419 ;
420 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
421 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
422 && alpha_mlat_string[2] == '\0')
423 {
424 static int const cache_latency[][4] =
425 {
426 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
427 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
428 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
429 };
430
431 lat = alpha_mlat_string[1] - '0';
432 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
433 {
434 warning (0, "L%d cache latency unknown for %s",
435 lat, alpha_cpu_name[alpha_tune]);
436 lat = 3;
437 }
438 else
439 lat = cache_latency[alpha_tune][lat-1];
440 }
441 else if (! strcmp (alpha_mlat_string, "main"))
442 {
443 /* Most current memories have about 370ns latency. This is
444 a reasonable guess for a fast cpu. */
445 lat = 150;
446 }
447 else
448 {
449 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
450 lat = 3;
451 }
452
453 alpha_memory_latency = lat;
454 }
455
456 /* Default the definition of "small data" to 8 bytes. */
457 if (!global_options_set.x_g_switch_value)
458 g_switch_value = 8;
459
460 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
461 if (flag_pic == 1)
462 target_flags |= MASK_SMALL_DATA;
463 else if (flag_pic == 2)
464 target_flags &= ~MASK_SMALL_DATA;
465
466 /* Align labels and loops for optimal branching. */
467 /* ??? Kludge these by not doing anything if we don't optimize and also if
468 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
469 if (optimize > 0 && write_symbols != SDB_DEBUG)
470 {
471 if (align_loops <= 0)
472 align_loops = 16;
473 if (align_jumps <= 0)
474 align_jumps = 16;
475 }
476 if (align_functions <= 0)
477 align_functions = 16;
478
479 /* Acquire a unique set number for our register saves and restores. */
480 alpha_sr_alias_set = new_alias_set ();
481
482 /* Register variables and functions with the garbage collector. */
483
484 /* Set up function hooks. */
485 init_machine_status = alpha_init_machine_status;
486
487 /* Tell the compiler when we're using VAX floating point. */
488 if (TARGET_FLOAT_VAX)
489 {
490 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
491 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
492 REAL_MODE_FORMAT (TFmode) = NULL;
493 }
494
495 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
496 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
497 target_flags |= MASK_LONG_DOUBLE_128;
498 #endif
499 }
500 \f
501 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
502
503 int
504 zap_mask (HOST_WIDE_INT value)
505 {
506 int i;
507
508 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
509 i++, value >>= 8)
510 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
511 return 0;
512
513 return 1;
514 }
515
516 /* Return true if OP is valid for a particular TLS relocation.
517 We are already guaranteed that OP is a CONST. */
518
519 int
520 tls_symbolic_operand_1 (rtx op, int size, int unspec)
521 {
522 op = XEXP (op, 0);
523
524 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
525 return 0;
526 op = XVECEXP (op, 0, 0);
527
528 if (GET_CODE (op) != SYMBOL_REF)
529 return 0;
530
531 switch (SYMBOL_REF_TLS_MODEL (op))
532 {
533 case TLS_MODEL_LOCAL_DYNAMIC:
534 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
535 case TLS_MODEL_INITIAL_EXEC:
536 return unspec == UNSPEC_TPREL && size == 64;
537 case TLS_MODEL_LOCAL_EXEC:
538 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
539 default:
540 gcc_unreachable ();
541 }
542 }
543
544 /* Used by aligned_memory_operand and unaligned_memory_operand to
545 resolve what reload is going to do with OP if it's a register. */
546
547 rtx
548 resolve_reload_operand (rtx op)
549 {
550 if (reload_in_progress)
551 {
552 rtx tmp = op;
553 if (GET_CODE (tmp) == SUBREG)
554 tmp = SUBREG_REG (tmp);
555 if (REG_P (tmp)
556 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
557 {
558 op = reg_equiv_memory_loc[REGNO (tmp)];
559 if (op == 0)
560 return 0;
561 }
562 }
563 return op;
564 }
565
566 /* The scalar modes supported differs from the default check-what-c-supports
567 version in that sometimes TFmode is available even when long double
568 indicates only DFmode. */
569
570 static bool
571 alpha_scalar_mode_supported_p (enum machine_mode mode)
572 {
573 switch (mode)
574 {
575 case QImode:
576 case HImode:
577 case SImode:
578 case DImode:
579 case TImode: /* via optabs.c */
580 return true;
581
582 case SFmode:
583 case DFmode:
584 return true;
585
586 case TFmode:
587 return TARGET_HAS_XFLOATING_LIBS;
588
589 default:
590 return false;
591 }
592 }
593
594 /* Alpha implements a couple of integer vector mode operations when
595 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
596 which allows the vectorizer to operate on e.g. move instructions,
597 or when expand_vector_operations can do something useful. */
598
599 static bool
600 alpha_vector_mode_supported_p (enum machine_mode mode)
601 {
602 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
603 }
604
605 /* Return 1 if this function can directly return via $26. */
606
607 int
608 direct_return (void)
609 {
610 return (TARGET_ABI_OSF
611 && reload_completed
612 && alpha_sa_size () == 0
613 && get_frame_size () == 0
614 && crtl->outgoing_args_size == 0
615 && crtl->args.pretend_args_size == 0);
616 }
617
618 /* Return the ADDR_VEC associated with a tablejump insn. */
619
620 rtx
621 alpha_tablejump_addr_vec (rtx insn)
622 {
623 rtx tmp;
624
625 tmp = JUMP_LABEL (insn);
626 if (!tmp)
627 return NULL_RTX;
628 tmp = NEXT_INSN (tmp);
629 if (!tmp)
630 return NULL_RTX;
631 if (JUMP_P (tmp)
632 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
633 return PATTERN (tmp);
634 return NULL_RTX;
635 }
636
637 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
638
639 rtx
640 alpha_tablejump_best_label (rtx insn)
641 {
642 rtx jump_table = alpha_tablejump_addr_vec (insn);
643 rtx best_label = NULL_RTX;
644
645 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
646 there for edge frequency counts from profile data. */
647
648 if (jump_table)
649 {
650 int n_labels = XVECLEN (jump_table, 1);
651 int best_count = -1;
652 int i, j;
653
654 for (i = 0; i < n_labels; i++)
655 {
656 int count = 1;
657
658 for (j = i + 1; j < n_labels; j++)
659 if (XEXP (XVECEXP (jump_table, 1, i), 0)
660 == XEXP (XVECEXP (jump_table, 1, j), 0))
661 count++;
662
663 if (count > best_count)
664 best_count = count, best_label = XVECEXP (jump_table, 1, i);
665 }
666 }
667
668 return best_label ? best_label : const0_rtx;
669 }
670
671 /* Return the TLS model to use for SYMBOL. */
672
673 static enum tls_model
674 tls_symbolic_operand_type (rtx symbol)
675 {
676 enum tls_model model;
677
678 if (GET_CODE (symbol) != SYMBOL_REF)
679 return TLS_MODEL_NONE;
680 model = SYMBOL_REF_TLS_MODEL (symbol);
681
682 /* Local-exec with a 64-bit size is the same code as initial-exec. */
683 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
684 model = TLS_MODEL_INITIAL_EXEC;
685
686 return model;
687 }
688 \f
689 /* Return true if the function DECL will share the same GP as any
690 function in the current unit of translation. */
691
692 static bool
693 decl_has_samegp (const_tree decl)
694 {
695 /* Functions that are not local can be overridden, and thus may
696 not share the same gp. */
697 if (!(*targetm.binds_local_p) (decl))
698 return false;
699
700 /* If -msmall-data is in effect, assume that there is only one GP
701 for the module, and so any local symbol has this property. We
702 need explicit relocations to be able to enforce this for symbols
703 not defined in this unit of translation, however. */
704 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
705 return true;
706
707 /* Functions that are not external are defined in this UoT. */
708 /* ??? Irritatingly, static functions not yet emitted are still
709 marked "external". Apply this to non-static functions only. */
710 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
711 }
712
713 /* Return true if EXP should be placed in the small data section. */
714
715 static bool
716 alpha_in_small_data_p (const_tree exp)
717 {
718 /* We want to merge strings, so we never consider them small data. */
719 if (TREE_CODE (exp) == STRING_CST)
720 return false;
721
722 /* Functions are never in the small data area. Duh. */
723 if (TREE_CODE (exp) == FUNCTION_DECL)
724 return false;
725
726 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
727 {
728 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
729 if (strcmp (section, ".sdata") == 0
730 || strcmp (section, ".sbss") == 0)
731 return true;
732 }
733 else
734 {
735 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
736
737 /* If this is an incomplete type with size 0, then we can't put it
738 in sdata because it might be too big when completed. */
739 if (size > 0 && size <= g_switch_value)
740 return true;
741 }
742
743 return false;
744 }
745
746 #if TARGET_ABI_OPEN_VMS
747 static bool
748 vms_valid_pointer_mode (enum machine_mode mode)
749 {
750 return (mode == SImode || mode == DImode);
751 }
752
753 static bool
754 alpha_linkage_symbol_p (const char *symname)
755 {
756 int symlen = strlen (symname);
757
758 if (symlen > 4)
759 return strcmp (&symname [symlen - 4], "..lk") == 0;
760
761 return false;
762 }
763
764 #define LINKAGE_SYMBOL_REF_P(X) \
765 ((GET_CODE (X) == SYMBOL_REF \
766 && alpha_linkage_symbol_p (XSTR (X, 0))) \
767 || (GET_CODE (X) == CONST \
768 && GET_CODE (XEXP (X, 0)) == PLUS \
769 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
770 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
771 #endif
772
773 /* legitimate_address_p recognizes an RTL expression that is a valid
774 memory address for an instruction. The MODE argument is the
775 machine mode for the MEM expression that wants to use this address.
776
777 For Alpha, we have either a constant address or the sum of a
778 register and a constant address, or just a register. For DImode,
779 any of those forms can be surrounded with an AND that clear the
780 low-order three bits; this is an "unaligned" access. */
781
782 static bool
783 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
784 {
785 /* If this is an ldq_u type address, discard the outer AND. */
786 if (mode == DImode
787 && GET_CODE (x) == AND
788 && CONST_INT_P (XEXP (x, 1))
789 && INTVAL (XEXP (x, 1)) == -8)
790 x = XEXP (x, 0);
791
792 /* Discard non-paradoxical subregs. */
793 if (GET_CODE (x) == SUBREG
794 && (GET_MODE_SIZE (GET_MODE (x))
795 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
796 x = SUBREG_REG (x);
797
798 /* Unadorned general registers are valid. */
799 if (REG_P (x)
800 && (strict
801 ? STRICT_REG_OK_FOR_BASE_P (x)
802 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
803 return true;
804
805 /* Constant addresses (i.e. +/- 32k) are valid. */
806 if (CONSTANT_ADDRESS_P (x))
807 return true;
808
809 #if TARGET_ABI_OPEN_VMS
810 if (LINKAGE_SYMBOL_REF_P (x))
811 return true;
812 #endif
813
814 /* Register plus a small constant offset is valid. */
815 if (GET_CODE (x) == PLUS)
816 {
817 rtx ofs = XEXP (x, 1);
818 x = XEXP (x, 0);
819
820 /* Discard non-paradoxical subregs. */
821 if (GET_CODE (x) == SUBREG
822 && (GET_MODE_SIZE (GET_MODE (x))
823 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
824 x = SUBREG_REG (x);
825
826 if (REG_P (x))
827 {
828 if (! strict
829 && NONSTRICT_REG_OK_FP_BASE_P (x)
830 && CONST_INT_P (ofs))
831 return true;
832 if ((strict
833 ? STRICT_REG_OK_FOR_BASE_P (x)
834 : NONSTRICT_REG_OK_FOR_BASE_P (x))
835 && CONSTANT_ADDRESS_P (ofs))
836 return true;
837 }
838 }
839
840 /* If we're managing explicit relocations, LO_SUM is valid, as are small
841 data symbols. Avoid explicit relocations of modes larger than word
842 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
843 else if (TARGET_EXPLICIT_RELOCS
844 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
845 {
846 if (small_symbolic_operand (x, Pmode))
847 return true;
848
849 if (GET_CODE (x) == LO_SUM)
850 {
851 rtx ofs = XEXP (x, 1);
852 x = XEXP (x, 0);
853
854 /* Discard non-paradoxical subregs. */
855 if (GET_CODE (x) == SUBREG
856 && (GET_MODE_SIZE (GET_MODE (x))
857 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
858 x = SUBREG_REG (x);
859
860 /* Must have a valid base register. */
861 if (! (REG_P (x)
862 && (strict
863 ? STRICT_REG_OK_FOR_BASE_P (x)
864 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
865 return false;
866
867 /* The symbol must be local. */
868 if (local_symbolic_operand (ofs, Pmode)
869 || dtp32_symbolic_operand (ofs, Pmode)
870 || tp32_symbolic_operand (ofs, Pmode))
871 return true;
872 }
873 }
874
875 return false;
876 }
877
878 /* Build the SYMBOL_REF for __tls_get_addr. */
879
880 static GTY(()) rtx tls_get_addr_libfunc;
881
882 static rtx
883 get_tls_get_addr (void)
884 {
885 if (!tls_get_addr_libfunc)
886 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
887 return tls_get_addr_libfunc;
888 }
889
890 /* Try machine-dependent ways of modifying an illegitimate address
891 to be legitimate. If we find one, return the new, valid address. */
892
893 static rtx
894 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
895 {
896 HOST_WIDE_INT addend;
897
898 /* If the address is (plus reg const_int) and the CONST_INT is not a
899 valid offset, compute the high part of the constant and add it to
900 the register. Then our address is (plus temp low-part-const). */
901 if (GET_CODE (x) == PLUS
902 && REG_P (XEXP (x, 0))
903 && CONST_INT_P (XEXP (x, 1))
904 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
905 {
906 addend = INTVAL (XEXP (x, 1));
907 x = XEXP (x, 0);
908 goto split_addend;
909 }
910
911 /* If the address is (const (plus FOO const_int)), find the low-order
912 part of the CONST_INT. Then load FOO plus any high-order part of the
913 CONST_INT into a register. Our address is (plus reg low-part-const).
914 This is done to reduce the number of GOT entries. */
915 if (can_create_pseudo_p ()
916 && GET_CODE (x) == CONST
917 && GET_CODE (XEXP (x, 0)) == PLUS
918 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
919 {
920 addend = INTVAL (XEXP (XEXP (x, 0), 1));
921 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
922 goto split_addend;
923 }
924
925 /* If we have a (plus reg const), emit the load as in (2), then add
926 the two registers, and finally generate (plus reg low-part-const) as
927 our address. */
928 if (can_create_pseudo_p ()
929 && GET_CODE (x) == PLUS
930 && REG_P (XEXP (x, 0))
931 && GET_CODE (XEXP (x, 1)) == CONST
932 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
933 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
934 {
935 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
936 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
937 XEXP (XEXP (XEXP (x, 1), 0), 0),
938 NULL_RTX, 1, OPTAB_LIB_WIDEN);
939 goto split_addend;
940 }
941
942 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
943 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
944 around +/- 32k offset. */
945 if (TARGET_EXPLICIT_RELOCS
946 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
947 && symbolic_operand (x, Pmode))
948 {
949 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
950
951 switch (tls_symbolic_operand_type (x))
952 {
953 case TLS_MODEL_NONE:
954 break;
955
956 case TLS_MODEL_GLOBAL_DYNAMIC:
957 start_sequence ();
958
959 r0 = gen_rtx_REG (Pmode, 0);
960 r16 = gen_rtx_REG (Pmode, 16);
961 tga = get_tls_get_addr ();
962 dest = gen_reg_rtx (Pmode);
963 seq = GEN_INT (alpha_next_sequence_number++);
964
965 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
966 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
967 insn = emit_call_insn (insn);
968 RTL_CONST_CALL_P (insn) = 1;
969 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
970
971 insn = get_insns ();
972 end_sequence ();
973
974 emit_libcall_block (insn, dest, r0, x);
975 return dest;
976
977 case TLS_MODEL_LOCAL_DYNAMIC:
978 start_sequence ();
979
980 r0 = gen_rtx_REG (Pmode, 0);
981 r16 = gen_rtx_REG (Pmode, 16);
982 tga = get_tls_get_addr ();
983 scratch = gen_reg_rtx (Pmode);
984 seq = GEN_INT (alpha_next_sequence_number++);
985
986 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
987 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
988 insn = emit_call_insn (insn);
989 RTL_CONST_CALL_P (insn) = 1;
990 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
991
992 insn = get_insns ();
993 end_sequence ();
994
995 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
996 UNSPEC_TLSLDM_CALL);
997 emit_libcall_block (insn, scratch, r0, eqv);
998
999 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1000 eqv = gen_rtx_CONST (Pmode, eqv);
1001
1002 if (alpha_tls_size == 64)
1003 {
1004 dest = gen_reg_rtx (Pmode);
1005 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1006 emit_insn (gen_adddi3 (dest, dest, scratch));
1007 return dest;
1008 }
1009 if (alpha_tls_size == 32)
1010 {
1011 insn = gen_rtx_HIGH (Pmode, eqv);
1012 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1013 scratch = gen_reg_rtx (Pmode);
1014 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1015 }
1016 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1017
1018 case TLS_MODEL_INITIAL_EXEC:
1019 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1020 eqv = gen_rtx_CONST (Pmode, eqv);
1021 tp = gen_reg_rtx (Pmode);
1022 scratch = gen_reg_rtx (Pmode);
1023 dest = gen_reg_rtx (Pmode);
1024
1025 emit_insn (gen_load_tp (tp));
1026 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1027 emit_insn (gen_adddi3 (dest, tp, scratch));
1028 return dest;
1029
1030 case TLS_MODEL_LOCAL_EXEC:
1031 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1032 eqv = gen_rtx_CONST (Pmode, eqv);
1033 tp = gen_reg_rtx (Pmode);
1034
1035 emit_insn (gen_load_tp (tp));
1036 if (alpha_tls_size == 32)
1037 {
1038 insn = gen_rtx_HIGH (Pmode, eqv);
1039 insn = gen_rtx_PLUS (Pmode, tp, insn);
1040 tp = gen_reg_rtx (Pmode);
1041 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1042 }
1043 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1044
1045 default:
1046 gcc_unreachable ();
1047 }
1048
1049 if (local_symbolic_operand (x, Pmode))
1050 {
1051 if (small_symbolic_operand (x, Pmode))
1052 return x;
1053 else
1054 {
1055 if (can_create_pseudo_p ())
1056 scratch = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1058 gen_rtx_HIGH (Pmode, x)));
1059 return gen_rtx_LO_SUM (Pmode, scratch, x);
1060 }
1061 }
1062 }
1063
1064 return NULL;
1065
1066 split_addend:
1067 {
1068 HOST_WIDE_INT low, high;
1069
1070 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1071 addend -= low;
1072 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1073 addend -= high;
1074
1075 if (addend)
1076 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1077 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1078 1, OPTAB_LIB_WIDEN);
1079 if (high)
1080 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1081 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1082 1, OPTAB_LIB_WIDEN);
1083
1084 return plus_constant (x, low);
1085 }
1086 }
1087
1088
1089 /* Try machine-dependent ways of modifying an illegitimate address
1090 to be legitimate. Return X or the new, valid address. */
1091
1092 static rtx
1093 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1094 enum machine_mode mode)
1095 {
1096 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1097 return new_x ? new_x : x;
1098 }
1099
1100 /* Primarily this is required for TLS symbols, but given that our move
1101 patterns *ought* to be able to handle any symbol at any time, we
1102 should never be spilling symbolic operands to the constant pool, ever. */
1103
1104 static bool
1105 alpha_cannot_force_const_mem (rtx x)
1106 {
1107 enum rtx_code code = GET_CODE (x);
1108 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1109 }
1110
1111 /* We do not allow indirect calls to be optimized into sibling calls, nor
1112 can we allow a call to a function with a different GP to be optimized
1113 into a sibcall. */
1114
1115 static bool
1116 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1117 {
1118 /* Can't do indirect tail calls, since we don't know if the target
1119 uses the same GP. */
1120 if (!decl)
1121 return false;
1122
1123 /* Otherwise, we can make a tail call if the target function shares
1124 the same GP. */
1125 return decl_has_samegp (decl);
1126 }
1127
1128 int
1129 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1130 {
1131 rtx x = *px;
1132
1133 /* Don't re-split. */
1134 if (GET_CODE (x) == LO_SUM)
1135 return -1;
1136
1137 return small_symbolic_operand (x, Pmode) != 0;
1138 }
1139
1140 static int
1141 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1142 {
1143 rtx x = *px;
1144
1145 /* Don't re-split. */
1146 if (GET_CODE (x) == LO_SUM)
1147 return -1;
1148
1149 if (small_symbolic_operand (x, Pmode))
1150 {
1151 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1152 *px = x;
1153 return -1;
1154 }
1155
1156 return 0;
1157 }
1158
1159 rtx
1160 split_small_symbolic_operand (rtx x)
1161 {
1162 x = copy_insn (x);
1163 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1164 return x;
1165 }
1166
1167 /* Indicate that INSN cannot be duplicated. This is true for any insn
1168 that we've marked with gpdisp relocs, since those have to stay in
1169 1-1 correspondence with one another.
1170
1171 Technically we could copy them if we could set up a mapping from one
1172 sequence number to another, across the set of insns to be duplicated.
1173 This seems overly complicated and error-prone since interblock motion
1174 from sched-ebb could move one of the pair of insns to a different block.
1175
1176 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1177 then they'll be in a different block from their ldgp. Which could lead
1178 the bb reorder code to think that it would be ok to copy just the block
1179 containing the call and branch to the block containing the ldgp. */
1180
1181 static bool
1182 alpha_cannot_copy_insn_p (rtx insn)
1183 {
1184 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1185 return false;
1186 if (recog_memoized (insn) >= 0)
1187 return get_attr_cannot_copy (insn);
1188 else
1189 return false;
1190 }
1191
1192
1193 /* Try a machine-dependent way of reloading an illegitimate address
1194 operand. If we find one, push the reload and return the new rtx. */
1195
1196 rtx
1197 alpha_legitimize_reload_address (rtx x,
1198 enum machine_mode mode ATTRIBUTE_UNUSED,
1199 int opnum, int type,
1200 int ind_levels ATTRIBUTE_UNUSED)
1201 {
1202 /* We must recognize output that we have already generated ourselves. */
1203 if (GET_CODE (x) == PLUS
1204 && GET_CODE (XEXP (x, 0)) == PLUS
1205 && REG_P (XEXP (XEXP (x, 0), 0))
1206 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1207 && CONST_INT_P (XEXP (x, 1)))
1208 {
1209 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1210 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1211 opnum, (enum reload_type) type);
1212 return x;
1213 }
1214
1215 /* We wish to handle large displacements off a base register by
1216 splitting the addend across an ldah and the mem insn. This
1217 cuts number of extra insns needed from 3 to 1. */
1218 if (GET_CODE (x) == PLUS
1219 && REG_P (XEXP (x, 0))
1220 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1221 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1222 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1223 {
1224 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1225 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1226 HOST_WIDE_INT high
1227 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1228
1229 /* Check for 32-bit overflow. */
1230 if (high + low != val)
1231 return NULL_RTX;
1232
1233 /* Reload the high part into a base reg; leave the low part
1234 in the mem directly. */
1235 x = gen_rtx_PLUS (GET_MODE (x),
1236 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1237 GEN_INT (high)),
1238 GEN_INT (low));
1239
1240 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1241 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1242 opnum, (enum reload_type) type);
1243 return x;
1244 }
1245
1246 return NULL_RTX;
1247 }
1248 \f
1249 /* Compute a (partial) cost for rtx X. Return true if the complete
1250 cost has been computed, and false if subexpressions should be
1251 scanned. In either case, *TOTAL contains the cost result. */
1252
1253 static bool
1254 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1255 bool speed)
1256 {
1257 enum machine_mode mode = GET_MODE (x);
1258 bool float_mode_p = FLOAT_MODE_P (mode);
1259 const struct alpha_rtx_cost_data *cost_data;
1260
1261 if (!speed)
1262 cost_data = &alpha_rtx_cost_size;
1263 else
1264 cost_data = &alpha_rtx_cost_data[alpha_tune];
1265
1266 switch (code)
1267 {
1268 case CONST_INT:
1269 /* If this is an 8-bit constant, return zero since it can be used
1270 nearly anywhere with no cost. If it is a valid operand for an
1271 ADD or AND, likewise return 0 if we know it will be used in that
1272 context. Otherwise, return 2 since it might be used there later.
1273 All other constants take at least two insns. */
1274 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1275 {
1276 *total = 0;
1277 return true;
1278 }
1279 /* FALLTHRU */
1280
1281 case CONST_DOUBLE:
1282 if (x == CONST0_RTX (mode))
1283 *total = 0;
1284 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1285 || (outer_code == AND && and_operand (x, VOIDmode)))
1286 *total = 0;
1287 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1288 *total = 2;
1289 else
1290 *total = COSTS_N_INSNS (2);
1291 return true;
1292
1293 case CONST:
1294 case SYMBOL_REF:
1295 case LABEL_REF:
1296 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1297 *total = COSTS_N_INSNS (outer_code != MEM);
1298 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1299 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1300 else if (tls_symbolic_operand_type (x))
1301 /* Estimate of cost for call_pal rduniq. */
1302 /* ??? How many insns do we emit here? More than one... */
1303 *total = COSTS_N_INSNS (15);
1304 else
1305 /* Otherwise we do a load from the GOT. */
1306 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1307 return true;
1308
1309 case HIGH:
1310 /* This is effectively an add_operand. */
1311 *total = 2;
1312 return true;
1313
1314 case PLUS:
1315 case MINUS:
1316 if (float_mode_p)
1317 *total = cost_data->fp_add;
1318 else if (GET_CODE (XEXP (x, 0)) == MULT
1319 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1320 {
1321 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1322 (enum rtx_code) outer_code, speed)
1323 + rtx_cost (XEXP (x, 1),
1324 (enum rtx_code) outer_code, speed)
1325 + COSTS_N_INSNS (1));
1326 return true;
1327 }
1328 return false;
1329
1330 case MULT:
1331 if (float_mode_p)
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1335 else
1336 *total = cost_data->int_mult_si;
1337 return false;
1338
1339 case ASHIFT:
1340 if (CONST_INT_P (XEXP (x, 1))
1341 && INTVAL (XEXP (x, 1)) <= 3)
1342 {
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1345 }
1346 /* FALLTHRU */
1347
1348 case ASHIFTRT:
1349 case LSHIFTRT:
1350 *total = cost_data->int_shift;
1351 return false;
1352
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
1355 *total = cost_data->fp_add;
1356 else
1357 *total = cost_data->int_cmov;
1358 return false;
1359
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1368 else
1369 *total = cost_data->fp_div_df;
1370 return false;
1371
1372 case MEM:
1373 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1374 return true;
1375
1376 case NEG:
1377 if (! float_mode_p)
1378 {
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1381 }
1382 /* FALLTHRU */
1383
1384 case ABS:
1385 if (! float_mode_p)
1386 {
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1388 return false;
1389 }
1390 /* FALLTHRU */
1391
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1398 return false;
1399
1400 case FLOAT_EXTEND:
1401 if (MEM_P (XEXP (x, 0)))
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1406
1407 default:
1408 return false;
1409 }
1410 }
1411 \f
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1416
1417 void
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1419 {
1420 rtx base;
1421 HOST_WIDE_INT disp, offset;
1422
1423 gcc_assert (MEM_P (ref));
1424
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1427 {
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 }
1431 else
1432 base = XEXP (ref, 0);
1433
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1438
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
1447
1448 /* The location should not cross aligned word boundary. */
1449 gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
1450 <= GET_MODE_SIZE (SImode));
1451
1452 /* Access the entire aligned word. */
1453 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1454
1455 /* Convert the byte offset within the word to a bit offset. */
1456 offset *= BITS_PER_UNIT;
1457 *pbitnum = GEN_INT (offset);
1458 }
1459
1460 /* Similar, but just get the address. Handle the two reload cases.
1461 Add EXTRA_OFFSET to the address we return. */
1462
1463 rtx
1464 get_unaligned_address (rtx ref)
1465 {
1466 rtx base;
1467 HOST_WIDE_INT offset = 0;
1468
1469 gcc_assert (MEM_P (ref));
1470
1471 if (reload_in_progress
1472 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1473 {
1474 base = find_replacement (&XEXP (ref, 0));
1475
1476 gcc_assert (memory_address_p (GET_MODE (ref), base));
1477 }
1478 else
1479 base = XEXP (ref, 0);
1480
1481 if (GET_CODE (base) == PLUS)
1482 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1483
1484 return plus_constant (base, offset);
1485 }
1486
1487 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1488 X is always returned in a register. */
1489
1490 rtx
1491 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1492 {
1493 if (GET_CODE (addr) == PLUS)
1494 {
1495 ofs += INTVAL (XEXP (addr, 1));
1496 addr = XEXP (addr, 0);
1497 }
1498
1499 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1500 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1501 }
1502
1503 /* On the Alpha, all (non-symbolic) constants except zero go into
1504 a floating-point register via memory. Note that we cannot
1505 return anything that is not a subset of RCLASS, and that some
1506 symbolic constants cannot be dropped to memory. */
1507
1508 enum reg_class
1509 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1510 {
1511 /* Zero is present in any register class. */
1512 if (x == CONST0_RTX (GET_MODE (x)))
1513 return rclass;
1514
1515 /* These sorts of constants we can easily drop to memory. */
1516 if (CONST_INT_P (x)
1517 || GET_CODE (x) == CONST_DOUBLE
1518 || GET_CODE (x) == CONST_VECTOR)
1519 {
1520 if (rclass == FLOAT_REGS)
1521 return NO_REGS;
1522 if (rclass == ALL_REGS)
1523 return GENERAL_REGS;
1524 return rclass;
1525 }
1526
1527 /* All other kinds of constants should not (and in the case of HIGH
1528 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1529 secondary reload. */
1530 if (CONSTANT_P (x))
1531 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1532
1533 return rclass;
1534 }
1535
1536 /* Inform reload about cases where moving X with a mode MODE to a register in
1537 RCLASS requires an extra scratch or immediate register. Return the class
1538 needed for the immediate register. */
1539
1540 static reg_class_t
1541 alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1542 enum machine_mode mode, secondary_reload_info *sri)
1543 {
1544 enum reg_class rclass = (enum reg_class) rclass_i;
1545
1546 /* Loading and storing HImode or QImode values to and from memory
1547 usually requires a scratch register. */
1548 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1549 {
1550 if (any_memory_operand (x, mode))
1551 {
1552 if (in_p)
1553 {
1554 if (!aligned_memory_operand (x, mode))
1555 sri->icode = direct_optab_handler (reload_in_optab, mode);
1556 }
1557 else
1558 sri->icode = direct_optab_handler (reload_out_optab, mode);
1559 return NO_REGS;
1560 }
1561 }
1562
1563 /* We also cannot do integral arithmetic into FP regs, as might result
1564 from register elimination into a DImode fp register. */
1565 if (rclass == FLOAT_REGS)
1566 {
1567 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1568 return GENERAL_REGS;
1569 if (in_p && INTEGRAL_MODE_P (mode)
1570 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1571 return GENERAL_REGS;
1572 }
1573
1574 return NO_REGS;
1575 }
1576 \f
1577 /* Subfunction of the following function. Update the flags of any MEM
1578 found in part of X. */
1579
1580 static int
1581 alpha_set_memflags_1 (rtx *xp, void *data)
1582 {
1583 rtx x = *xp, orig = (rtx) data;
1584
1585 if (!MEM_P (x))
1586 return 0;
1587
1588 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1589 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1590 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1591 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1592 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1593
1594 /* Sadly, we cannot use alias sets because the extra aliasing
1595 produced by the AND interferes. Given that two-byte quantities
1596 are the only thing we would be able to differentiate anyway,
1597 there does not seem to be any point in convoluting the early
1598 out of the alias check. */
1599
1600 return -1;
1601 }
1602
1603 /* Given SEQ, which is an INSN list, look for any MEMs in either
1604 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1605 volatile flags from REF into each of the MEMs found. If REF is not
1606 a MEM, don't do anything. */
1607
1608 void
1609 alpha_set_memflags (rtx seq, rtx ref)
1610 {
1611 rtx insn;
1612
1613 if (!MEM_P (ref))
1614 return;
1615
1616 /* This is only called from alpha.md, after having had something
1617 generated from one of the insn patterns. So if everything is
1618 zero, the pattern is already up-to-date. */
1619 if (!MEM_VOLATILE_P (ref)
1620 && !MEM_IN_STRUCT_P (ref)
1621 && !MEM_SCALAR_P (ref)
1622 && !MEM_NOTRAP_P (ref)
1623 && !MEM_READONLY_P (ref))
1624 return;
1625
1626 for (insn = seq; insn; insn = NEXT_INSN (insn))
1627 if (INSN_P (insn))
1628 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1629 else
1630 gcc_unreachable ();
1631 }
1632 \f
1633 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1634 int, bool);
1635
1636 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1637 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1638 and return pc_rtx if successful. */
1639
1640 static rtx
1641 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1642 HOST_WIDE_INT c, int n, bool no_output)
1643 {
1644 HOST_WIDE_INT new_const;
1645 int i, bits;
1646 /* Use a pseudo if highly optimizing and still generating RTL. */
1647 rtx subtarget
1648 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1649 rtx temp, insn;
1650
1651 /* If this is a sign-extended 32-bit constant, we can do this in at most
1652 three insns, so do it if we have enough insns left. We always have
1653 a sign-extended 32-bit constant when compiling on a narrow machine. */
1654
1655 if (HOST_BITS_PER_WIDE_INT != 64
1656 || c >> 31 == -1 || c >> 31 == 0)
1657 {
1658 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT tmp1 = c - low;
1660 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1661 HOST_WIDE_INT extra = 0;
1662
1663 /* If HIGH will be interpreted as negative but the constant is
1664 positive, we must adjust it to do two ldha insns. */
1665
1666 if ((high & 0x8000) != 0 && c >= 0)
1667 {
1668 extra = 0x4000;
1669 tmp1 -= 0x40000000;
1670 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1671 }
1672
1673 if (c == low || (low == 0 && extra == 0))
1674 {
1675 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1676 but that meant that we can't handle INT_MIN on 32-bit machines
1677 (like NT/Alpha), because we recurse indefinitely through
1678 emit_move_insn to gen_movdi. So instead, since we know exactly
1679 what we want, create it explicitly. */
1680
1681 if (no_output)
1682 return pc_rtx;
1683 if (target == NULL)
1684 target = gen_reg_rtx (mode);
1685 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1686 return target;
1687 }
1688 else if (n >= 2 + (extra != 0))
1689 {
1690 if (no_output)
1691 return pc_rtx;
1692 if (!can_create_pseudo_p ())
1693 {
1694 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1695 temp = target;
1696 }
1697 else
1698 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1699 subtarget, mode);
1700
1701 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1702 This means that if we go through expand_binop, we'll try to
1703 generate extensions, etc, which will require new pseudos, which
1704 will fail during some split phases. The SImode add patterns
1705 still exist, but are not named. So build the insns by hand. */
1706
1707 if (extra != 0)
1708 {
1709 if (! subtarget)
1710 subtarget = gen_reg_rtx (mode);
1711 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1712 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1713 emit_insn (insn);
1714 temp = subtarget;
1715 }
1716
1717 if (target == NULL)
1718 target = gen_reg_rtx (mode);
1719 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1720 insn = gen_rtx_SET (VOIDmode, target, insn);
1721 emit_insn (insn);
1722 return target;
1723 }
1724 }
1725
1726 /* If we couldn't do it that way, try some other methods. But if we have
1727 no instructions left, don't bother. Likewise, if this is SImode and
1728 we can't make pseudos, we can't do anything since the expand_binop
1729 and expand_unop calls will widen and try to make pseudos. */
1730
1731 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1732 return 0;
1733
1734 /* Next, see if we can load a related constant and then shift and possibly
1735 negate it to get the constant we want. Try this once each increasing
1736 numbers of insns. */
1737
1738 for (i = 1; i < n; i++)
1739 {
1740 /* First, see if minus some low bits, we've an easy load of
1741 high bits. */
1742
1743 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1744 if (new_const != 0)
1745 {
1746 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1747 if (temp)
1748 {
1749 if (no_output)
1750 return temp;
1751 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1752 target, 0, OPTAB_WIDEN);
1753 }
1754 }
1755
1756 /* Next try complementing. */
1757 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1758 if (temp)
1759 {
1760 if (no_output)
1761 return temp;
1762 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1763 }
1764
1765 /* Next try to form a constant and do a left shift. We can do this
1766 if some low-order bits are zero; the exact_log2 call below tells
1767 us that information. The bits we are shifting out could be any
1768 value, but here we'll just try the 0- and sign-extended forms of
1769 the constant. To try to increase the chance of having the same
1770 constant in more than one insn, start at the highest number of
1771 bits to shift, but try all possibilities in case a ZAPNOT will
1772 be useful. */
1773
1774 bits = exact_log2 (c & -c);
1775 if (bits > 0)
1776 for (; bits > 0; bits--)
1777 {
1778 new_const = c >> bits;
1779 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1780 if (!temp && c < 0)
1781 {
1782 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1783 temp = alpha_emit_set_const (subtarget, mode, new_const,
1784 i, no_output);
1785 }
1786 if (temp)
1787 {
1788 if (no_output)
1789 return temp;
1790 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1791 target, 0, OPTAB_WIDEN);
1792 }
1793 }
1794
1795 /* Now try high-order zero bits. Here we try the shifted-in bits as
1796 all zero and all ones. Be careful to avoid shifting outside the
1797 mode and to avoid shifting outside the host wide int size. */
1798 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1799 confuse the recursive call and set all of the high 32 bits. */
1800
1801 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1802 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1803 if (bits > 0)
1804 for (; bits > 0; bits--)
1805 {
1806 new_const = c << bits;
1807 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1808 if (!temp)
1809 {
1810 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1811 temp = alpha_emit_set_const (subtarget, mode, new_const,
1812 i, no_output);
1813 }
1814 if (temp)
1815 {
1816 if (no_output)
1817 return temp;
1818 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1819 target, 1, OPTAB_WIDEN);
1820 }
1821 }
1822
1823 /* Now try high-order 1 bits. We get that with a sign-extension.
1824 But one bit isn't enough here. Be careful to avoid shifting outside
1825 the mode and to avoid shifting outside the host wide int size. */
1826
1827 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1828 - floor_log2 (~ c) - 2);
1829 if (bits > 0)
1830 for (; bits > 0; bits--)
1831 {
1832 new_const = c << bits;
1833 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1834 if (!temp)
1835 {
1836 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1837 temp = alpha_emit_set_const (subtarget, mode, new_const,
1838 i, no_output);
1839 }
1840 if (temp)
1841 {
1842 if (no_output)
1843 return temp;
1844 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1845 target, 0, OPTAB_WIDEN);
1846 }
1847 }
1848 }
1849
1850 #if HOST_BITS_PER_WIDE_INT == 64
1851 /* Finally, see if can load a value into the target that is the same as the
1852 constant except that all bytes that are 0 are changed to be 0xff. If we
1853 can, then we can do a ZAPNOT to obtain the desired constant. */
1854
1855 new_const = c;
1856 for (i = 0; i < 64; i += 8)
1857 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1858 new_const |= (HOST_WIDE_INT) 0xff << i;
1859
1860 /* We are only called for SImode and DImode. If this is SImode, ensure that
1861 we are sign extended to a full word. */
1862
1863 if (mode == SImode)
1864 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1865
1866 if (new_const != c)
1867 {
1868 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1869 if (temp)
1870 {
1871 if (no_output)
1872 return temp;
1873 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1874 target, 0, OPTAB_WIDEN);
1875 }
1876 }
1877 #endif
1878
1879 return 0;
1880 }
1881
1882 /* Try to output insns to set TARGET equal to the constant C if it can be
1883 done in less than N insns. Do all computations in MODE. Returns the place
1884 where the output has been placed if it can be done and the insns have been
1885 emitted. If it would take more than N insns, zero is returned and no
1886 insns and emitted. */
1887
1888 static rtx
1889 alpha_emit_set_const (rtx target, enum machine_mode mode,
1890 HOST_WIDE_INT c, int n, bool no_output)
1891 {
1892 enum machine_mode orig_mode = mode;
1893 rtx orig_target = target;
1894 rtx result = 0;
1895 int i;
1896
1897 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1898 can't load this constant in one insn, do this in DImode. */
1899 if (!can_create_pseudo_p () && mode == SImode
1900 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1901 {
1902 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1903 if (result)
1904 return result;
1905
1906 target = no_output ? NULL : gen_lowpart (DImode, target);
1907 mode = DImode;
1908 }
1909 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1910 {
1911 target = no_output ? NULL : gen_lowpart (DImode, target);
1912 mode = DImode;
1913 }
1914
1915 /* Try 1 insn, then 2, then up to N. */
1916 for (i = 1; i <= n; i++)
1917 {
1918 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1919 if (result)
1920 {
1921 rtx insn, set;
1922
1923 if (no_output)
1924 return result;
1925
1926 insn = get_last_insn ();
1927 set = single_set (insn);
1928 if (! CONSTANT_P (SET_SRC (set)))
1929 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1930 break;
1931 }
1932 }
1933
1934 /* Allow for the case where we changed the mode of TARGET. */
1935 if (result)
1936 {
1937 if (result == target)
1938 result = orig_target;
1939 else if (mode != orig_mode)
1940 result = gen_lowpart (orig_mode, result);
1941 }
1942
1943 return result;
1944 }
1945
1946 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1947 fall back to a straight forward decomposition. We do this to avoid
1948 exponential run times encountered when looking for longer sequences
1949 with alpha_emit_set_const. */
1950
1951 static rtx
1952 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1953 {
1954 HOST_WIDE_INT d1, d2, d3, d4;
1955
1956 /* Decompose the entire word */
1957 #if HOST_BITS_PER_WIDE_INT >= 64
1958 gcc_assert (c2 == -(c1 < 0));
1959 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1960 c1 -= d1;
1961 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1962 c1 = (c1 - d2) >> 32;
1963 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1964 c1 -= d3;
1965 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1966 gcc_assert (c1 == d4);
1967 #else
1968 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1969 c1 -= d1;
1970 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1971 gcc_assert (c1 == d2);
1972 c2 += (d2 < 0);
1973 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1974 c2 -= d3;
1975 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1976 gcc_assert (c2 == d4);
1977 #endif
1978
1979 /* Construct the high word */
1980 if (d4)
1981 {
1982 emit_move_insn (target, GEN_INT (d4));
1983 if (d3)
1984 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1985 }
1986 else
1987 emit_move_insn (target, GEN_INT (d3));
1988
1989 /* Shift it into place */
1990 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1991
1992 /* Add in the low bits. */
1993 if (d2)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1995 if (d1)
1996 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1997
1998 return target;
1999 }
2000
2001 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2002 the low 64 bits. */
2003
2004 static void
2005 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2006 {
2007 HOST_WIDE_INT i0, i1;
2008
2009 if (GET_CODE (x) == CONST_VECTOR)
2010 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2011
2012
2013 if (CONST_INT_P (x))
2014 {
2015 i0 = INTVAL (x);
2016 i1 = -(i0 < 0);
2017 }
2018 else if (HOST_BITS_PER_WIDE_INT >= 64)
2019 {
2020 i0 = CONST_DOUBLE_LOW (x);
2021 i1 = -(i0 < 0);
2022 }
2023 else
2024 {
2025 i0 = CONST_DOUBLE_LOW (x);
2026 i1 = CONST_DOUBLE_HIGH (x);
2027 }
2028
2029 *p0 = i0;
2030 *p1 = i1;
2031 }
2032
2033 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2034 are willing to load the value into a register via a move pattern.
2035 Normally this is all symbolic constants, integral constants that
2036 take three or fewer instructions, and floating-point zero. */
2037
2038 bool
2039 alpha_legitimate_constant_p (rtx x)
2040 {
2041 enum machine_mode mode = GET_MODE (x);
2042 HOST_WIDE_INT i0, i1;
2043
2044 switch (GET_CODE (x))
2045 {
2046 case LABEL_REF:
2047 case HIGH:
2048 return true;
2049
2050 case CONST:
2051 if (GET_CODE (XEXP (x, 0)) == PLUS
2052 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
2053 x = XEXP (XEXP (x, 0), 0);
2054 else
2055 return true;
2056
2057 if (GET_CODE (x) != SYMBOL_REF)
2058 return true;
2059
2060 /* FALLTHRU */
2061
2062 case SYMBOL_REF:
2063 /* TLS symbols are never valid. */
2064 return SYMBOL_REF_TLS_MODEL (x) == 0;
2065
2066 case CONST_DOUBLE:
2067 if (x == CONST0_RTX (mode))
2068 return true;
2069 if (FLOAT_MODE_P (mode))
2070 return false;
2071 goto do_integer;
2072
2073 case CONST_VECTOR:
2074 if (x == CONST0_RTX (mode))
2075 return true;
2076 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2077 return false;
2078 if (GET_MODE_SIZE (mode) != 8)
2079 return false;
2080 goto do_integer;
2081
2082 case CONST_INT:
2083 do_integer:
2084 if (TARGET_BUILD_CONSTANTS)
2085 return true;
2086 alpha_extract_integer (x, &i0, &i1);
2087 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2088 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2089 return false;
2090
2091 default:
2092 return false;
2093 }
2094 }
2095
2096 /* Operand 1 is known to be a constant, and should require more than one
2097 instruction to load. Emit that multi-part load. */
2098
2099 bool
2100 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2101 {
2102 HOST_WIDE_INT i0, i1;
2103 rtx temp = NULL_RTX;
2104
2105 alpha_extract_integer (operands[1], &i0, &i1);
2106
2107 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2108 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2109
2110 if (!temp && TARGET_BUILD_CONSTANTS)
2111 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2112
2113 if (temp)
2114 {
2115 if (!rtx_equal_p (operands[0], temp))
2116 emit_move_insn (operands[0], temp);
2117 return true;
2118 }
2119
2120 return false;
2121 }
2122
2123 /* Expand a move instruction; return true if all work is done.
2124 We don't handle non-bwx subword loads here. */
2125
2126 bool
2127 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2128 {
2129 rtx tmp;
2130
2131 /* If the output is not a register, the input must be. */
2132 if (MEM_P (operands[0])
2133 && ! reg_or_0_operand (operands[1], mode))
2134 operands[1] = force_reg (mode, operands[1]);
2135
2136 /* Allow legitimize_address to perform some simplifications. */
2137 if (mode == Pmode && symbolic_operand (operands[1], mode))
2138 {
2139 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2140 if (tmp)
2141 {
2142 if (tmp == operands[0])
2143 return true;
2144 operands[1] = tmp;
2145 return false;
2146 }
2147 }
2148
2149 /* Early out for non-constants and valid constants. */
2150 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2151 return false;
2152
2153 /* Split large integers. */
2154 if (CONST_INT_P (operands[1])
2155 || GET_CODE (operands[1]) == CONST_DOUBLE
2156 || GET_CODE (operands[1]) == CONST_VECTOR)
2157 {
2158 if (alpha_split_const_mov (mode, operands))
2159 return true;
2160 }
2161
2162 /* Otherwise we've nothing left but to drop the thing to memory. */
2163 tmp = force_const_mem (mode, operands[1]);
2164
2165 if (tmp == NULL_RTX)
2166 return false;
2167
2168 if (reload_in_progress)
2169 {
2170 emit_move_insn (operands[0], XEXP (tmp, 0));
2171 operands[1] = replace_equiv_address (tmp, operands[0]);
2172 }
2173 else
2174 operands[1] = validize_mem (tmp);
2175 return false;
2176 }
2177
2178 /* Expand a non-bwx QImode or HImode move instruction;
2179 return true if all work is done. */
2180
2181 bool
2182 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2183 {
2184 rtx seq;
2185
2186 /* If the output is not a register, the input must be. */
2187 if (MEM_P (operands[0]))
2188 operands[1] = force_reg (mode, operands[1]);
2189
2190 /* Handle four memory cases, unaligned and aligned for either the input
2191 or the output. The only case where we can be called during reload is
2192 for aligned loads; all other cases require temporaries. */
2193
2194 if (any_memory_operand (operands[1], mode))
2195 {
2196 if (aligned_memory_operand (operands[1], mode))
2197 {
2198 if (reload_in_progress)
2199 {
2200 if (mode == QImode)
2201 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2202 else
2203 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2204 emit_insn (seq);
2205 }
2206 else
2207 {
2208 rtx aligned_mem, bitnum;
2209 rtx scratch = gen_reg_rtx (SImode);
2210 rtx subtarget;
2211 bool copyout;
2212
2213 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2214
2215 subtarget = operands[0];
2216 if (REG_P (subtarget))
2217 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2218 else
2219 subtarget = gen_reg_rtx (DImode), copyout = true;
2220
2221 if (mode == QImode)
2222 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2223 bitnum, scratch);
2224 else
2225 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2226 bitnum, scratch);
2227 emit_insn (seq);
2228
2229 if (copyout)
2230 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2231 }
2232 }
2233 else
2234 {
2235 /* Don't pass these as parameters since that makes the generated
2236 code depend on parameter evaluation order which will cause
2237 bootstrap failures. */
2238
2239 rtx temp1, temp2, subtarget, ua;
2240 bool copyout;
2241
2242 temp1 = gen_reg_rtx (DImode);
2243 temp2 = gen_reg_rtx (DImode);
2244
2245 subtarget = operands[0];
2246 if (REG_P (subtarget))
2247 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2248 else
2249 subtarget = gen_reg_rtx (DImode), copyout = true;
2250
2251 ua = get_unaligned_address (operands[1]);
2252 if (mode == QImode)
2253 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2254 else
2255 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2256
2257 alpha_set_memflags (seq, operands[1]);
2258 emit_insn (seq);
2259
2260 if (copyout)
2261 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2262 }
2263 return true;
2264 }
2265
2266 if (any_memory_operand (operands[0], mode))
2267 {
2268 if (aligned_memory_operand (operands[0], mode))
2269 {
2270 rtx aligned_mem, bitnum;
2271 rtx temp1 = gen_reg_rtx (SImode);
2272 rtx temp2 = gen_reg_rtx (SImode);
2273
2274 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2275
2276 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2277 temp1, temp2));
2278 }
2279 else
2280 {
2281 rtx temp1 = gen_reg_rtx (DImode);
2282 rtx temp2 = gen_reg_rtx (DImode);
2283 rtx temp3 = gen_reg_rtx (DImode);
2284 rtx ua = get_unaligned_address (operands[0]);
2285
2286 if (mode == QImode)
2287 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2288 else
2289 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2290
2291 alpha_set_memflags (seq, operands[0]);
2292 emit_insn (seq);
2293 }
2294 return true;
2295 }
2296
2297 return false;
2298 }
2299
2300 /* Implement the movmisalign patterns. One of the operands is a memory
2301 that is not naturally aligned. Emit instructions to load it. */
2302
2303 void
2304 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2305 {
2306 /* Honor misaligned loads, for those we promised to do so. */
2307 if (MEM_P (operands[1]))
2308 {
2309 rtx tmp;
2310
2311 if (register_operand (operands[0], mode))
2312 tmp = operands[0];
2313 else
2314 tmp = gen_reg_rtx (mode);
2315
2316 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2317 if (tmp != operands[0])
2318 emit_move_insn (operands[0], tmp);
2319 }
2320 else if (MEM_P (operands[0]))
2321 {
2322 if (!reg_or_0_operand (operands[1], mode))
2323 operands[1] = force_reg (mode, operands[1]);
2324 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2325 }
2326 else
2327 gcc_unreachable ();
2328 }
2329
2330 /* Generate an unsigned DImode to FP conversion. This is the same code
2331 optabs would emit if we didn't have TFmode patterns.
2332
2333 For SFmode, this is the only construction I've found that can pass
2334 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2335 intermediates will work, because you'll get intermediate rounding
2336 that ruins the end result. Some of this could be fixed by turning
2337 on round-to-positive-infinity, but that requires diddling the fpsr,
2338 which kills performance. I tried turning this around and converting
2339 to a negative number, so that I could turn on /m, but either I did
2340 it wrong or there's something else cause I wound up with the exact
2341 same single-bit error. There is a branch-less form of this same code:
2342
2343 srl $16,1,$1
2344 and $16,1,$2
2345 cmplt $16,0,$3
2346 or $1,$2,$2
2347 cmovge $16,$16,$2
2348 itoft $3,$f10
2349 itoft $2,$f11
2350 cvtqs $f11,$f11
2351 adds $f11,$f11,$f0
2352 fcmoveq $f10,$f11,$f0
2353
2354 I'm not using it because it's the same number of instructions as
2355 this branch-full form, and it has more serialized long latency
2356 instructions on the critical path.
2357
2358 For DFmode, we can avoid rounding errors by breaking up the word
2359 into two pieces, converting them separately, and adding them back:
2360
2361 LC0: .long 0,0x5f800000
2362
2363 itoft $16,$f11
2364 lda $2,LC0
2365 cmplt $16,0,$1
2366 cpyse $f11,$f31,$f10
2367 cpyse $f31,$f11,$f11
2368 s4addq $1,$2,$1
2369 lds $f12,0($1)
2370 cvtqt $f10,$f10
2371 cvtqt $f11,$f11
2372 addt $f12,$f10,$f0
2373 addt $f0,$f11,$f0
2374
2375 This doesn't seem to be a clear-cut win over the optabs form.
2376 It probably all depends on the distribution of numbers being
2377 converted -- in the optabs form, all but high-bit-set has a
2378 much lower minimum execution time. */
2379
2380 void
2381 alpha_emit_floatuns (rtx operands[2])
2382 {
2383 rtx neglab, donelab, i0, i1, f0, in, out;
2384 enum machine_mode mode;
2385
2386 out = operands[0];
2387 in = force_reg (DImode, operands[1]);
2388 mode = GET_MODE (out);
2389 neglab = gen_label_rtx ();
2390 donelab = gen_label_rtx ();
2391 i0 = gen_reg_rtx (DImode);
2392 i1 = gen_reg_rtx (DImode);
2393 f0 = gen_reg_rtx (mode);
2394
2395 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2396
2397 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2398 emit_jump_insn (gen_jump (donelab));
2399 emit_barrier ();
2400
2401 emit_label (neglab);
2402
2403 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2404 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2405 emit_insn (gen_iordi3 (i0, i0, i1));
2406 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2407 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2408
2409 emit_label (donelab);
2410 }
2411
2412 /* Generate the comparison for a conditional branch. */
2413
2414 void
2415 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2416 {
2417 enum rtx_code cmp_code, branch_code;
2418 enum machine_mode branch_mode = VOIDmode;
2419 enum rtx_code code = GET_CODE (operands[0]);
2420 rtx op0 = operands[1], op1 = operands[2];
2421 rtx tem;
2422
2423 if (cmp_mode == TFmode)
2424 {
2425 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2426 op1 = const0_rtx;
2427 cmp_mode = DImode;
2428 }
2429
2430 /* The general case: fold the comparison code to the types of compares
2431 that we have, choosing the branch as necessary. */
2432 switch (code)
2433 {
2434 case EQ: case LE: case LT: case LEU: case LTU:
2435 case UNORDERED:
2436 /* We have these compares: */
2437 cmp_code = code, branch_code = NE;
2438 break;
2439
2440 case NE:
2441 case ORDERED:
2442 /* These must be reversed. */
2443 cmp_code = reverse_condition (code), branch_code = EQ;
2444 break;
2445
2446 case GE: case GT: case GEU: case GTU:
2447 /* For FP, we swap them, for INT, we reverse them. */
2448 if (cmp_mode == DFmode)
2449 {
2450 cmp_code = swap_condition (code);
2451 branch_code = NE;
2452 tem = op0, op0 = op1, op1 = tem;
2453 }
2454 else
2455 {
2456 cmp_code = reverse_condition (code);
2457 branch_code = EQ;
2458 }
2459 break;
2460
2461 default:
2462 gcc_unreachable ();
2463 }
2464
2465 if (cmp_mode == DFmode)
2466 {
2467 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2468 {
2469 /* When we are not as concerned about non-finite values, and we
2470 are comparing against zero, we can branch directly. */
2471 if (op1 == CONST0_RTX (DFmode))
2472 cmp_code = UNKNOWN, branch_code = code;
2473 else if (op0 == CONST0_RTX (DFmode))
2474 {
2475 /* Undo the swap we probably did just above. */
2476 tem = op0, op0 = op1, op1 = tem;
2477 branch_code = swap_condition (cmp_code);
2478 cmp_code = UNKNOWN;
2479 }
2480 }
2481 else
2482 {
2483 /* ??? We mark the branch mode to be CCmode to prevent the
2484 compare and branch from being combined, since the compare
2485 insn follows IEEE rules that the branch does not. */
2486 branch_mode = CCmode;
2487 }
2488 }
2489 else
2490 {
2491 /* The following optimizations are only for signed compares. */
2492 if (code != LEU && code != LTU && code != GEU && code != GTU)
2493 {
2494 /* Whee. Compare and branch against 0 directly. */
2495 if (op1 == const0_rtx)
2496 cmp_code = UNKNOWN, branch_code = code;
2497
2498 /* If the constants doesn't fit into an immediate, but can
2499 be generated by lda/ldah, we adjust the argument and
2500 compare against zero, so we can use beq/bne directly. */
2501 /* ??? Don't do this when comparing against symbols, otherwise
2502 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2503 be declared false out of hand (at least for non-weak). */
2504 else if (CONST_INT_P (op1)
2505 && (code == EQ || code == NE)
2506 && !(symbolic_operand (op0, VOIDmode)
2507 || (REG_P (op0) && REG_POINTER (op0))))
2508 {
2509 rtx n_op1 = GEN_INT (-INTVAL (op1));
2510
2511 if (! satisfies_constraint_I (op1)
2512 && (satisfies_constraint_K (n_op1)
2513 || satisfies_constraint_L (n_op1)))
2514 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2515 }
2516 }
2517
2518 if (!reg_or_0_operand (op0, DImode))
2519 op0 = force_reg (DImode, op0);
2520 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2521 op1 = force_reg (DImode, op1);
2522 }
2523
2524 /* Emit an initial compare instruction, if necessary. */
2525 tem = op0;
2526 if (cmp_code != UNKNOWN)
2527 {
2528 tem = gen_reg_rtx (cmp_mode);
2529 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2530 }
2531
2532 /* Emit the branch instruction. */
2533 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2534 gen_rtx_IF_THEN_ELSE (VOIDmode,
2535 gen_rtx_fmt_ee (branch_code,
2536 branch_mode, tem,
2537 CONST0_RTX (cmp_mode)),
2538 gen_rtx_LABEL_REF (VOIDmode,
2539 operands[3]),
2540 pc_rtx));
2541 emit_jump_insn (tem);
2542 }
2543
2544 /* Certain simplifications can be done to make invalid setcc operations
2545 valid. Return the final comparison, or NULL if we can't work. */
2546
2547 bool
2548 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2549 {
2550 enum rtx_code cmp_code;
2551 enum rtx_code code = GET_CODE (operands[1]);
2552 rtx op0 = operands[2], op1 = operands[3];
2553 rtx tmp;
2554
2555 if (cmp_mode == TFmode)
2556 {
2557 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2558 op1 = const0_rtx;
2559 cmp_mode = DImode;
2560 }
2561
2562 if (cmp_mode == DFmode && !TARGET_FIX)
2563 return 0;
2564
2565 /* The general case: fold the comparison code to the types of compares
2566 that we have, choosing the branch as necessary. */
2567
2568 cmp_code = UNKNOWN;
2569 switch (code)
2570 {
2571 case EQ: case LE: case LT: case LEU: case LTU:
2572 case UNORDERED:
2573 /* We have these compares. */
2574 if (cmp_mode == DFmode)
2575 cmp_code = code, code = NE;
2576 break;
2577
2578 case NE:
2579 if (cmp_mode == DImode && op1 == const0_rtx)
2580 break;
2581 /* FALLTHRU */
2582
2583 case ORDERED:
2584 cmp_code = reverse_condition (code);
2585 code = EQ;
2586 break;
2587
2588 case GE: case GT: case GEU: case GTU:
2589 /* These normally need swapping, but for integer zero we have
2590 special patterns that recognize swapped operands. */
2591 if (cmp_mode == DImode && op1 == const0_rtx)
2592 break;
2593 code = swap_condition (code);
2594 if (cmp_mode == DFmode)
2595 cmp_code = code, code = NE;
2596 tmp = op0, op0 = op1, op1 = tmp;
2597 break;
2598
2599 default:
2600 gcc_unreachable ();
2601 }
2602
2603 if (cmp_mode == DImode)
2604 {
2605 if (!register_operand (op0, DImode))
2606 op0 = force_reg (DImode, op0);
2607 if (!reg_or_8bit_operand (op1, DImode))
2608 op1 = force_reg (DImode, op1);
2609 }
2610
2611 /* Emit an initial compare instruction, if necessary. */
2612 if (cmp_code != UNKNOWN)
2613 {
2614 tmp = gen_reg_rtx (cmp_mode);
2615 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2616 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2617
2618 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2619 op1 = const0_rtx;
2620 }
2621
2622 /* Emit the setcc instruction. */
2623 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2624 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2625 return true;
2626 }
2627
2628
2629 /* Rewrite a comparison against zero CMP of the form
2630 (CODE (cc0) (const_int 0)) so it can be written validly in
2631 a conditional move (if_then_else CMP ...).
2632 If both of the operands that set cc0 are nonzero we must emit
2633 an insn to perform the compare (it can't be done within
2634 the conditional move). */
2635
2636 rtx
2637 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2638 {
2639 enum rtx_code code = GET_CODE (cmp);
2640 enum rtx_code cmov_code = NE;
2641 rtx op0 = XEXP (cmp, 0);
2642 rtx op1 = XEXP (cmp, 1);
2643 enum machine_mode cmp_mode
2644 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2645 enum machine_mode cmov_mode = VOIDmode;
2646 int local_fast_math = flag_unsafe_math_optimizations;
2647 rtx tem;
2648
2649 if (cmp_mode == TFmode)
2650 {
2651 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2652 op1 = const0_rtx;
2653 cmp_mode = DImode;
2654 }
2655
2656 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2657
2658 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2659 {
2660 enum rtx_code cmp_code;
2661
2662 if (! TARGET_FIX)
2663 return 0;
2664
2665 /* If we have fp<->int register move instructions, do a cmov by
2666 performing the comparison in fp registers, and move the
2667 zero/nonzero value to integer registers, where we can then
2668 use a normal cmov, or vice-versa. */
2669
2670 switch (code)
2671 {
2672 case EQ: case LE: case LT: case LEU: case LTU:
2673 /* We have these compares. */
2674 cmp_code = code, code = NE;
2675 break;
2676
2677 case NE:
2678 /* This must be reversed. */
2679 cmp_code = EQ, code = EQ;
2680 break;
2681
2682 case GE: case GT: case GEU: case GTU:
2683 /* These normally need swapping, but for integer zero we have
2684 special patterns that recognize swapped operands. */
2685 if (cmp_mode == DImode && op1 == const0_rtx)
2686 cmp_code = code, code = NE;
2687 else
2688 {
2689 cmp_code = swap_condition (code);
2690 code = NE;
2691 tem = op0, op0 = op1, op1 = tem;
2692 }
2693 break;
2694
2695 default:
2696 gcc_unreachable ();
2697 }
2698
2699 tem = gen_reg_rtx (cmp_mode);
2700 emit_insn (gen_rtx_SET (VOIDmode, tem,
2701 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2702 op0, op1)));
2703
2704 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2705 op0 = gen_lowpart (cmp_mode, tem);
2706 op1 = CONST0_RTX (cmp_mode);
2707 local_fast_math = 1;
2708 }
2709
2710 /* We may be able to use a conditional move directly.
2711 This avoids emitting spurious compares. */
2712 if (signed_comparison_operator (cmp, VOIDmode)
2713 && (cmp_mode == DImode || local_fast_math)
2714 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2715 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2716
2717 /* We can't put the comparison inside the conditional move;
2718 emit a compare instruction and put that inside the
2719 conditional move. Make sure we emit only comparisons we have;
2720 swap or reverse as necessary. */
2721
2722 if (!can_create_pseudo_p ())
2723 return NULL_RTX;
2724
2725 switch (code)
2726 {
2727 case EQ: case LE: case LT: case LEU: case LTU:
2728 /* We have these compares: */
2729 break;
2730
2731 case NE:
2732 /* This must be reversed. */
2733 code = reverse_condition (code);
2734 cmov_code = EQ;
2735 break;
2736
2737 case GE: case GT: case GEU: case GTU:
2738 /* These must be swapped. */
2739 if (op1 != CONST0_RTX (cmp_mode))
2740 {
2741 code = swap_condition (code);
2742 tem = op0, op0 = op1, op1 = tem;
2743 }
2744 break;
2745
2746 default:
2747 gcc_unreachable ();
2748 }
2749
2750 if (cmp_mode == DImode)
2751 {
2752 if (!reg_or_0_operand (op0, DImode))
2753 op0 = force_reg (DImode, op0);
2754 if (!reg_or_8bit_operand (op1, DImode))
2755 op1 = force_reg (DImode, op1);
2756 }
2757
2758 /* ??? We mark the branch mode to be CCmode to prevent the compare
2759 and cmov from being combined, since the compare insn follows IEEE
2760 rules that the cmov does not. */
2761 if (cmp_mode == DFmode && !local_fast_math)
2762 cmov_mode = CCmode;
2763
2764 tem = gen_reg_rtx (cmp_mode);
2765 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2766 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2767 }
2768
2769 /* Simplify a conditional move of two constants into a setcc with
2770 arithmetic. This is done with a splitter since combine would
2771 just undo the work if done during code generation. It also catches
2772 cases we wouldn't have before cse. */
2773
2774 int
2775 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2776 rtx t_rtx, rtx f_rtx)
2777 {
2778 HOST_WIDE_INT t, f, diff;
2779 enum machine_mode mode;
2780 rtx target, subtarget, tmp;
2781
2782 mode = GET_MODE (dest);
2783 t = INTVAL (t_rtx);
2784 f = INTVAL (f_rtx);
2785 diff = t - f;
2786
2787 if (((code == NE || code == EQ) && diff < 0)
2788 || (code == GE || code == GT))
2789 {
2790 code = reverse_condition (code);
2791 diff = t, t = f, f = diff;
2792 diff = t - f;
2793 }
2794
2795 subtarget = target = dest;
2796 if (mode != DImode)
2797 {
2798 target = gen_lowpart (DImode, dest);
2799 if (can_create_pseudo_p ())
2800 subtarget = gen_reg_rtx (DImode);
2801 else
2802 subtarget = target;
2803 }
2804 /* Below, we must be careful to use copy_rtx on target and subtarget
2805 in intermediate insns, as they may be a subreg rtx, which may not
2806 be shared. */
2807
2808 if (f == 0 && exact_log2 (diff) > 0
2809 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2810 viable over a longer latency cmove. On EV5, the E0 slot is a
2811 scarce resource, and on EV4 shift has the same latency as a cmove. */
2812 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2813 {
2814 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2815 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2816
2817 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2818 GEN_INT (exact_log2 (t)));
2819 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2820 }
2821 else if (f == 0 && t == -1)
2822 {
2823 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2824 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2825
2826 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2827 }
2828 else if (diff == 1 || diff == 4 || diff == 8)
2829 {
2830 rtx add_op;
2831
2832 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2833 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2834
2835 if (diff == 1)
2836 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2837 else
2838 {
2839 add_op = GEN_INT (f);
2840 if (sext_add_operand (add_op, mode))
2841 {
2842 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2843 GEN_INT (diff));
2844 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2845 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2846 }
2847 else
2848 return 0;
2849 }
2850 }
2851 else
2852 return 0;
2853
2854 return 1;
2855 }
2856 \f
2857 /* Look up the function X_floating library function name for the
2858 given operation. */
2859
2860 struct GTY(()) xfloating_op
2861 {
2862 const enum rtx_code code;
2863 const char *const GTY((skip)) osf_func;
2864 const char *const GTY((skip)) vms_func;
2865 rtx libcall;
2866 };
2867
2868 static GTY(()) struct xfloating_op xfloating_ops[] =
2869 {
2870 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2871 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2872 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2873 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2874 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2875 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2876 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2877 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2878 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2879 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2880 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2881 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2882 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2883 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2884 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2885 };
2886
2887 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2888 {
2889 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2890 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2891 };
2892
2893 static rtx
2894 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2895 {
2896 struct xfloating_op *ops = xfloating_ops;
2897 long n = ARRAY_SIZE (xfloating_ops);
2898 long i;
2899
2900 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2901
2902 /* How irritating. Nothing to key off for the main table. */
2903 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2904 {
2905 ops = vax_cvt_ops;
2906 n = ARRAY_SIZE (vax_cvt_ops);
2907 }
2908
2909 for (i = 0; i < n; ++i, ++ops)
2910 if (ops->code == code)
2911 {
2912 rtx func = ops->libcall;
2913 if (!func)
2914 {
2915 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2916 ? ops->vms_func : ops->osf_func);
2917 ops->libcall = func;
2918 }
2919 return func;
2920 }
2921
2922 gcc_unreachable ();
2923 }
2924
2925 /* Most X_floating operations take the rounding mode as an argument.
2926 Compute that here. */
2927
2928 static int
2929 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2930 enum alpha_fp_rounding_mode round)
2931 {
2932 int mode;
2933
2934 switch (round)
2935 {
2936 case ALPHA_FPRM_NORM:
2937 mode = 2;
2938 break;
2939 case ALPHA_FPRM_MINF:
2940 mode = 1;
2941 break;
2942 case ALPHA_FPRM_CHOP:
2943 mode = 0;
2944 break;
2945 case ALPHA_FPRM_DYN:
2946 mode = 4;
2947 break;
2948 default:
2949 gcc_unreachable ();
2950
2951 /* XXX For reference, round to +inf is mode = 3. */
2952 }
2953
2954 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2955 mode |= 0x10000;
2956
2957 return mode;
2958 }
2959
2960 /* Emit an X_floating library function call.
2961
2962 Note that these functions do not follow normal calling conventions:
2963 TFmode arguments are passed in two integer registers (as opposed to
2964 indirect); TFmode return values appear in R16+R17.
2965
2966 FUNC is the function to call.
2967 TARGET is where the output belongs.
2968 OPERANDS are the inputs.
2969 NOPERANDS is the count of inputs.
2970 EQUIV is the expression equivalent for the function.
2971 */
2972
2973 static void
2974 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2975 int noperands, rtx equiv)
2976 {
2977 rtx usage = NULL_RTX, tmp, reg;
2978 int regno = 16, i;
2979
2980 start_sequence ();
2981
2982 for (i = 0; i < noperands; ++i)
2983 {
2984 switch (GET_MODE (operands[i]))
2985 {
2986 case TFmode:
2987 reg = gen_rtx_REG (TFmode, regno);
2988 regno += 2;
2989 break;
2990
2991 case DFmode:
2992 reg = gen_rtx_REG (DFmode, regno + 32);
2993 regno += 1;
2994 break;
2995
2996 case VOIDmode:
2997 gcc_assert (CONST_INT_P (operands[i]));
2998 /* FALLTHRU */
2999 case DImode:
3000 reg = gen_rtx_REG (DImode, regno);
3001 regno += 1;
3002 break;
3003
3004 default:
3005 gcc_unreachable ();
3006 }
3007
3008 emit_move_insn (reg, operands[i]);
3009 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3010 }
3011
3012 switch (GET_MODE (target))
3013 {
3014 case TFmode:
3015 reg = gen_rtx_REG (TFmode, 16);
3016 break;
3017 case DFmode:
3018 reg = gen_rtx_REG (DFmode, 32);
3019 break;
3020 case DImode:
3021 reg = gen_rtx_REG (DImode, 0);
3022 break;
3023 default:
3024 gcc_unreachable ();
3025 }
3026
3027 tmp = gen_rtx_MEM (QImode, func);
3028 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3029 const0_rtx, const0_rtx));
3030 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3031 RTL_CONST_CALL_P (tmp) = 1;
3032
3033 tmp = get_insns ();
3034 end_sequence ();
3035
3036 emit_libcall_block (tmp, target, reg, equiv);
3037 }
3038
3039 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3040
3041 void
3042 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3043 {
3044 rtx func;
3045 int mode;
3046 rtx out_operands[3];
3047
3048 func = alpha_lookup_xfloating_lib_func (code);
3049 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3050
3051 out_operands[0] = operands[1];
3052 out_operands[1] = operands[2];
3053 out_operands[2] = GEN_INT (mode);
3054 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3055 gen_rtx_fmt_ee (code, TFmode, operands[1],
3056 operands[2]));
3057 }
3058
3059 /* Emit an X_floating library function call for a comparison. */
3060
3061 static rtx
3062 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3063 {
3064 enum rtx_code cmp_code, res_code;
3065 rtx func, out, operands[2], note;
3066
3067 /* X_floating library comparison functions return
3068 -1 unordered
3069 0 false
3070 1 true
3071 Convert the compare against the raw return value. */
3072
3073 cmp_code = *pcode;
3074 switch (cmp_code)
3075 {
3076 case UNORDERED:
3077 cmp_code = EQ;
3078 res_code = LT;
3079 break;
3080 case ORDERED:
3081 cmp_code = EQ;
3082 res_code = GE;
3083 break;
3084 case NE:
3085 res_code = NE;
3086 break;
3087 case EQ:
3088 case LT:
3089 case GT:
3090 case LE:
3091 case GE:
3092 res_code = GT;
3093 break;
3094 default:
3095 gcc_unreachable ();
3096 }
3097 *pcode = res_code;
3098
3099 func = alpha_lookup_xfloating_lib_func (cmp_code);
3100
3101 operands[0] = op0;
3102 operands[1] = op1;
3103 out = gen_reg_rtx (DImode);
3104
3105 /* What's actually returned is -1,0,1, not a proper boolean value,
3106 so use an EXPR_LIST as with a generic libcall instead of a
3107 comparison type expression. */
3108 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3109 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3110 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3111 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3112
3113 return out;
3114 }
3115
3116 /* Emit an X_floating library function call for a conversion. */
3117
3118 void
3119 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3120 {
3121 int noperands = 1, mode;
3122 rtx out_operands[2];
3123 rtx func;
3124 enum rtx_code code = orig_code;
3125
3126 if (code == UNSIGNED_FIX)
3127 code = FIX;
3128
3129 func = alpha_lookup_xfloating_lib_func (code);
3130
3131 out_operands[0] = operands[1];
3132
3133 switch (code)
3134 {
3135 case FIX:
3136 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3137 out_operands[1] = GEN_INT (mode);
3138 noperands = 2;
3139 break;
3140 case FLOAT_TRUNCATE:
3141 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3142 out_operands[1] = GEN_INT (mode);
3143 noperands = 2;
3144 break;
3145 default:
3146 break;
3147 }
3148
3149 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3150 gen_rtx_fmt_e (orig_code,
3151 GET_MODE (operands[0]),
3152 operands[1]));
3153 }
3154
3155 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3156 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3157 guarantee that the sequence
3158 set (OP[0] OP[2])
3159 set (OP[1] OP[3])
3160 is valid. Naturally, output operand ordering is little-endian.
3161 This is used by *movtf_internal and *movti_internal. */
3162
3163 void
3164 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3165 bool fixup_overlap)
3166 {
3167 switch (GET_CODE (operands[1]))
3168 {
3169 case REG:
3170 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3171 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3172 break;
3173
3174 case MEM:
3175 operands[3] = adjust_address (operands[1], DImode, 8);
3176 operands[2] = adjust_address (operands[1], DImode, 0);
3177 break;
3178
3179 case CONST_INT:
3180 case CONST_DOUBLE:
3181 gcc_assert (operands[1] == CONST0_RTX (mode));
3182 operands[2] = operands[3] = const0_rtx;
3183 break;
3184
3185 default:
3186 gcc_unreachable ();
3187 }
3188
3189 switch (GET_CODE (operands[0]))
3190 {
3191 case REG:
3192 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3193 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3194 break;
3195
3196 case MEM:
3197 operands[1] = adjust_address (operands[0], DImode, 8);
3198 operands[0] = adjust_address (operands[0], DImode, 0);
3199 break;
3200
3201 default:
3202 gcc_unreachable ();
3203 }
3204
3205 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3206 {
3207 rtx tmp;
3208 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3209 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3210 }
3211 }
3212
3213 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3214 op2 is a register containing the sign bit, operation is the
3215 logical operation to be performed. */
3216
3217 void
3218 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3219 {
3220 rtx high_bit = operands[2];
3221 rtx scratch;
3222 int move;
3223
3224 alpha_split_tmode_pair (operands, TFmode, false);
3225
3226 /* Detect three flavors of operand overlap. */
3227 move = 1;
3228 if (rtx_equal_p (operands[0], operands[2]))
3229 move = 0;
3230 else if (rtx_equal_p (operands[1], operands[2]))
3231 {
3232 if (rtx_equal_p (operands[0], high_bit))
3233 move = 2;
3234 else
3235 move = -1;
3236 }
3237
3238 if (move < 0)
3239 emit_move_insn (operands[0], operands[2]);
3240
3241 /* ??? If the destination overlaps both source tf and high_bit, then
3242 assume source tf is dead in its entirety and use the other half
3243 for a scratch register. Otherwise "scratch" is just the proper
3244 destination register. */
3245 scratch = operands[move < 2 ? 1 : 3];
3246
3247 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3248
3249 if (move > 0)
3250 {
3251 emit_move_insn (operands[0], operands[2]);
3252 if (move > 1)
3253 emit_move_insn (operands[1], scratch);
3254 }
3255 }
3256 \f
3257 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3258 unaligned data:
3259
3260 unsigned: signed:
3261 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3262 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3263 lda r3,X(r11) lda r3,X+2(r11)
3264 extwl r1,r3,r1 extql r1,r3,r1
3265 extwh r2,r3,r2 extqh r2,r3,r2
3266 or r1.r2.r1 or r1,r2,r1
3267 sra r1,48,r1
3268
3269 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3270 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3271 lda r3,X(r11) lda r3,X(r11)
3272 extll r1,r3,r1 extll r1,r3,r1
3273 extlh r2,r3,r2 extlh r2,r3,r2
3274 or r1.r2.r1 addl r1,r2,r1
3275
3276 quad: ldq_u r1,X(r11)
3277 ldq_u r2,X+7(r11)
3278 lda r3,X(r11)
3279 extql r1,r3,r1
3280 extqh r2,r3,r2
3281 or r1.r2.r1
3282 */
3283
3284 void
3285 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3286 HOST_WIDE_INT ofs, int sign)
3287 {
3288 rtx meml, memh, addr, extl, exth, tmp, mema;
3289 enum machine_mode mode;
3290
3291 if (TARGET_BWX && size == 2)
3292 {
3293 meml = adjust_address (mem, QImode, ofs);
3294 memh = adjust_address (mem, QImode, ofs+1);
3295 extl = gen_reg_rtx (DImode);
3296 exth = gen_reg_rtx (DImode);
3297 emit_insn (gen_zero_extendqidi2 (extl, meml));
3298 emit_insn (gen_zero_extendqidi2 (exth, memh));
3299 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3300 NULL, 1, OPTAB_LIB_WIDEN);
3301 addr = expand_simple_binop (DImode, IOR, extl, exth,
3302 NULL, 1, OPTAB_LIB_WIDEN);
3303
3304 if (sign && GET_MODE (tgt) != HImode)
3305 {
3306 addr = gen_lowpart (HImode, addr);
3307 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3308 }
3309 else
3310 {
3311 if (GET_MODE (tgt) != DImode)
3312 addr = gen_lowpart (GET_MODE (tgt), addr);
3313 emit_move_insn (tgt, addr);
3314 }
3315 return;
3316 }
3317
3318 meml = gen_reg_rtx (DImode);
3319 memh = gen_reg_rtx (DImode);
3320 addr = gen_reg_rtx (DImode);
3321 extl = gen_reg_rtx (DImode);
3322 exth = gen_reg_rtx (DImode);
3323
3324 mema = XEXP (mem, 0);
3325 if (GET_CODE (mema) == LO_SUM)
3326 mema = force_reg (Pmode, mema);
3327
3328 /* AND addresses cannot be in any alias set, since they may implicitly
3329 alias surrounding code. Ideally we'd have some alias set that
3330 covered all types except those with alignment 8 or higher. */
3331
3332 tmp = change_address (mem, DImode,
3333 gen_rtx_AND (DImode,
3334 plus_constant (mema, ofs),
3335 GEN_INT (-8)));
3336 set_mem_alias_set (tmp, 0);
3337 emit_move_insn (meml, tmp);
3338
3339 tmp = change_address (mem, DImode,
3340 gen_rtx_AND (DImode,
3341 plus_constant (mema, ofs + size - 1),
3342 GEN_INT (-8)));
3343 set_mem_alias_set (tmp, 0);
3344 emit_move_insn (memh, tmp);
3345
3346 if (sign && size == 2)
3347 {
3348 emit_move_insn (addr, plus_constant (mema, ofs+2));
3349
3350 emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
3351 emit_insn (gen_extqh (exth, memh, addr));
3352
3353 /* We must use tgt here for the target. Alpha-vms port fails if we use
3354 addr for the target, because addr is marked as a pointer and combine
3355 knows that pointers are always sign-extended 32-bit values. */
3356 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3357 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3358 addr, 1, OPTAB_WIDEN);
3359 }
3360 else
3361 {
3362 emit_move_insn (addr, plus_constant (mema, ofs));
3363 emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
3364 switch ((int) size)
3365 {
3366 case 2:
3367 emit_insn (gen_extwh (exth, memh, addr));
3368 mode = HImode;
3369 break;
3370
3371 case 4:
3372 emit_insn (gen_extlh (exth, memh, addr));
3373 mode = SImode;
3374 break;
3375
3376 case 8:
3377 emit_insn (gen_extqh (exth, memh, addr));
3378 mode = DImode;
3379 break;
3380
3381 default:
3382 gcc_unreachable ();
3383 }
3384
3385 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3386 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3387 sign, OPTAB_WIDEN);
3388 }
3389
3390 if (addr != tgt)
3391 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3392 }
3393
3394 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3395
3396 void
3397 alpha_expand_unaligned_store (rtx dst, rtx src,
3398 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3399 {
3400 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3401
3402 if (TARGET_BWX && size == 2)
3403 {
3404 if (src != const0_rtx)
3405 {
3406 dstl = gen_lowpart (QImode, src);
3407 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3408 NULL, 1, OPTAB_LIB_WIDEN);
3409 dsth = gen_lowpart (QImode, dsth);
3410 }
3411 else
3412 dstl = dsth = const0_rtx;
3413
3414 meml = adjust_address (dst, QImode, ofs);
3415 memh = adjust_address (dst, QImode, ofs+1);
3416
3417 emit_move_insn (meml, dstl);
3418 emit_move_insn (memh, dsth);
3419 return;
3420 }
3421
3422 dstl = gen_reg_rtx (DImode);
3423 dsth = gen_reg_rtx (DImode);
3424 insl = gen_reg_rtx (DImode);
3425 insh = gen_reg_rtx (DImode);
3426
3427 dsta = XEXP (dst, 0);
3428 if (GET_CODE (dsta) == LO_SUM)
3429 dsta = force_reg (Pmode, dsta);
3430
3431 /* AND addresses cannot be in any alias set, since they may implicitly
3432 alias surrounding code. Ideally we'd have some alias set that
3433 covered all types except those with alignment 8 or higher. */
3434
3435 meml = change_address (dst, DImode,
3436 gen_rtx_AND (DImode,
3437 plus_constant (dsta, ofs),
3438 GEN_INT (-8)));
3439 set_mem_alias_set (meml, 0);
3440
3441 memh = change_address (dst, DImode,
3442 gen_rtx_AND (DImode,
3443 plus_constant (dsta, ofs + size - 1),
3444 GEN_INT (-8)));
3445 set_mem_alias_set (memh, 0);
3446
3447 emit_move_insn (dsth, memh);
3448 emit_move_insn (dstl, meml);
3449
3450 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3451
3452 if (src != CONST0_RTX (GET_MODE (src)))
3453 {
3454 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3455 GEN_INT (size*8), addr));
3456
3457 switch ((int) size)
3458 {
3459 case 2:
3460 emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
3461 break;
3462 case 4:
3463 emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
3464 break;
3465 case 8:
3466 emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
3467 break;
3468 default:
3469 gcc_unreachable ();
3470 }
3471 }
3472
3473 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3474
3475 switch ((int) size)
3476 {
3477 case 2:
3478 emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
3479 break;
3480 case 4:
3481 {
3482 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3483 emit_insn (gen_mskxl (dstl, dstl, msk, addr));
3484 break;
3485 }
3486 case 8:
3487 emit_insn (gen_mskxl (dstl, dstl, constm1_rtx, addr));
3488 break;
3489 default:
3490 gcc_unreachable ();
3491 }
3492
3493 if (src != CONST0_RTX (GET_MODE (src)))
3494 {
3495 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3496 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3497 }
3498
3499 /* Must store high before low for degenerate case of aligned. */
3500 emit_move_insn (memh, dsth);
3501 emit_move_insn (meml, dstl);
3502 }
3503
3504 /* The block move code tries to maximize speed by separating loads and
3505 stores at the expense of register pressure: we load all of the data
3506 before we store it back out. There are two secondary effects worth
3507 mentioning, that this speeds copying to/from aligned and unaligned
3508 buffers, and that it makes the code significantly easier to write. */
3509
3510 #define MAX_MOVE_WORDS 8
3511
3512 /* Load an integral number of consecutive unaligned quadwords. */
3513
3514 static void
3515 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3516 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3517 {
3518 rtx const im8 = GEN_INT (-8);
3519 rtx const i64 = GEN_INT (64);
3520 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3521 rtx sreg, areg, tmp, smema;
3522 HOST_WIDE_INT i;
3523
3524 smema = XEXP (smem, 0);
3525 if (GET_CODE (smema) == LO_SUM)
3526 smema = force_reg (Pmode, smema);
3527
3528 /* Generate all the tmp registers we need. */
3529 for (i = 0; i < words; ++i)
3530 {
3531 data_regs[i] = out_regs[i];
3532 ext_tmps[i] = gen_reg_rtx (DImode);
3533 }
3534 data_regs[words] = gen_reg_rtx (DImode);
3535
3536 if (ofs != 0)
3537 smem = adjust_address (smem, GET_MODE (smem), ofs);
3538
3539 /* Load up all of the source data. */
3540 for (i = 0; i < words; ++i)
3541 {
3542 tmp = change_address (smem, DImode,
3543 gen_rtx_AND (DImode,
3544 plus_constant (smema, 8*i),
3545 im8));
3546 set_mem_alias_set (tmp, 0);
3547 emit_move_insn (data_regs[i], tmp);
3548 }
3549
3550 tmp = change_address (smem, DImode,
3551 gen_rtx_AND (DImode,
3552 plus_constant (smema, 8*words - 1),
3553 im8));
3554 set_mem_alias_set (tmp, 0);
3555 emit_move_insn (data_regs[words], tmp);
3556
3557 /* Extract the half-word fragments. Unfortunately DEC decided to make
3558 extxh with offset zero a noop instead of zeroing the register, so
3559 we must take care of that edge condition ourselves with cmov. */
3560
3561 sreg = copy_addr_to_reg (smema);
3562 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3563 1, OPTAB_WIDEN);
3564 for (i = 0; i < words; ++i)
3565 {
3566 emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
3567 emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
3568 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3569 gen_rtx_IF_THEN_ELSE (DImode,
3570 gen_rtx_EQ (DImode, areg,
3571 const0_rtx),
3572 const0_rtx, ext_tmps[i])));
3573 }
3574
3575 /* Merge the half-words into whole words. */
3576 for (i = 0; i < words; ++i)
3577 {
3578 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3579 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3580 }
3581 }
3582
3583 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3584 may be NULL to store zeros. */
3585
3586 static void
3587 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3588 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3589 {
3590 rtx const im8 = GEN_INT (-8);
3591 rtx const i64 = GEN_INT (64);
3592 rtx ins_tmps[MAX_MOVE_WORDS];
3593 rtx st_tmp_1, st_tmp_2, dreg;
3594 rtx st_addr_1, st_addr_2, dmema;
3595 HOST_WIDE_INT i;
3596
3597 dmema = XEXP (dmem, 0);
3598 if (GET_CODE (dmema) == LO_SUM)
3599 dmema = force_reg (Pmode, dmema);
3600
3601 /* Generate all the tmp registers we need. */
3602 if (data_regs != NULL)
3603 for (i = 0; i < words; ++i)
3604 ins_tmps[i] = gen_reg_rtx(DImode);
3605 st_tmp_1 = gen_reg_rtx(DImode);
3606 st_tmp_2 = gen_reg_rtx(DImode);
3607
3608 if (ofs != 0)
3609 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3610
3611 st_addr_2 = change_address (dmem, DImode,
3612 gen_rtx_AND (DImode,
3613 plus_constant (dmema, words*8 - 1),
3614 im8));
3615 set_mem_alias_set (st_addr_2, 0);
3616
3617 st_addr_1 = change_address (dmem, DImode,
3618 gen_rtx_AND (DImode, dmema, im8));
3619 set_mem_alias_set (st_addr_1, 0);
3620
3621 /* Load up the destination end bits. */
3622 emit_move_insn (st_tmp_2, st_addr_2);
3623 emit_move_insn (st_tmp_1, st_addr_1);
3624
3625 /* Shift the input data into place. */
3626 dreg = copy_addr_to_reg (dmema);
3627 if (data_regs != NULL)
3628 {
3629 for (i = words-1; i >= 0; --i)
3630 {
3631 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3632 emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
3633 }
3634 for (i = words-1; i > 0; --i)
3635 {
3636 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3637 ins_tmps[i-1], ins_tmps[i-1], 1,
3638 OPTAB_WIDEN);
3639 }
3640 }
3641
3642 /* Split and merge the ends with the destination data. */
3643 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3644 emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3645
3646 if (data_regs != NULL)
3647 {
3648 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3649 st_tmp_2, 1, OPTAB_WIDEN);
3650 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3651 st_tmp_1, 1, OPTAB_WIDEN);
3652 }
3653
3654 /* Store it all. */
3655 emit_move_insn (st_addr_2, st_tmp_2);
3656 for (i = words-1; i > 0; --i)
3657 {
3658 rtx tmp = change_address (dmem, DImode,
3659 gen_rtx_AND (DImode,
3660 plus_constant (dmema, i*8),
3661 im8));
3662 set_mem_alias_set (tmp, 0);
3663 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3664 }
3665 emit_move_insn (st_addr_1, st_tmp_1);
3666 }
3667
3668
3669 /* Expand string/block move operations.
3670
3671 operands[0] is the pointer to the destination.
3672 operands[1] is the pointer to the source.
3673 operands[2] is the number of bytes to move.
3674 operands[3] is the alignment. */
3675
3676 int
3677 alpha_expand_block_move (rtx operands[])
3678 {
3679 rtx bytes_rtx = operands[2];
3680 rtx align_rtx = operands[3];
3681 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3682 HOST_WIDE_INT bytes = orig_bytes;
3683 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3684 HOST_WIDE_INT dst_align = src_align;
3685 rtx orig_src = operands[1];
3686 rtx orig_dst = operands[0];
3687 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3688 rtx tmp;
3689 unsigned int i, words, ofs, nregs = 0;
3690
3691 if (orig_bytes <= 0)
3692 return 1;
3693 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3694 return 0;
3695
3696 /* Look for additional alignment information from recorded register info. */
3697
3698 tmp = XEXP (orig_src, 0);
3699 if (REG_P (tmp))
3700 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3701 else if (GET_CODE (tmp) == PLUS
3702 && REG_P (XEXP (tmp, 0))
3703 && CONST_INT_P (XEXP (tmp, 1)))
3704 {
3705 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3706 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3707
3708 if (a > src_align)
3709 {
3710 if (a >= 64 && c % 8 == 0)
3711 src_align = 64;
3712 else if (a >= 32 && c % 4 == 0)
3713 src_align = 32;
3714 else if (a >= 16 && c % 2 == 0)
3715 src_align = 16;
3716 }
3717 }
3718
3719 tmp = XEXP (orig_dst, 0);
3720 if (REG_P (tmp))
3721 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3722 else if (GET_CODE (tmp) == PLUS
3723 && REG_P (XEXP (tmp, 0))
3724 && CONST_INT_P (XEXP (tmp, 1)))
3725 {
3726 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3727 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3728
3729 if (a > dst_align)
3730 {
3731 if (a >= 64 && c % 8 == 0)
3732 dst_align = 64;
3733 else if (a >= 32 && c % 4 == 0)
3734 dst_align = 32;
3735 else if (a >= 16 && c % 2 == 0)
3736 dst_align = 16;
3737 }
3738 }
3739
3740 ofs = 0;
3741 if (src_align >= 64 && bytes >= 8)
3742 {
3743 words = bytes / 8;
3744
3745 for (i = 0; i < words; ++i)
3746 data_regs[nregs + i] = gen_reg_rtx (DImode);
3747
3748 for (i = 0; i < words; ++i)
3749 emit_move_insn (data_regs[nregs + i],
3750 adjust_address (orig_src, DImode, ofs + i * 8));
3751
3752 nregs += words;
3753 bytes -= words * 8;
3754 ofs += words * 8;
3755 }
3756
3757 if (src_align >= 32 && bytes >= 4)
3758 {
3759 words = bytes / 4;
3760
3761 for (i = 0; i < words; ++i)
3762 data_regs[nregs + i] = gen_reg_rtx (SImode);
3763
3764 for (i = 0; i < words; ++i)
3765 emit_move_insn (data_regs[nregs + i],
3766 adjust_address (orig_src, SImode, ofs + i * 4));
3767
3768 nregs += words;
3769 bytes -= words * 4;
3770 ofs += words * 4;
3771 }
3772
3773 if (bytes >= 8)
3774 {
3775 words = bytes / 8;
3776
3777 for (i = 0; i < words+1; ++i)
3778 data_regs[nregs + i] = gen_reg_rtx (DImode);
3779
3780 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3781 words, ofs);
3782
3783 nregs += words;
3784 bytes -= words * 8;
3785 ofs += words * 8;
3786 }
3787
3788 if (! TARGET_BWX && bytes >= 4)
3789 {
3790 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3791 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3792 bytes -= 4;
3793 ofs += 4;
3794 }
3795
3796 if (bytes >= 2)
3797 {
3798 if (src_align >= 16)
3799 {
3800 do {
3801 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3802 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3803 bytes -= 2;
3804 ofs += 2;
3805 } while (bytes >= 2);
3806 }
3807 else if (! TARGET_BWX)
3808 {
3809 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3810 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3811 bytes -= 2;
3812 ofs += 2;
3813 }
3814 }
3815
3816 while (bytes > 0)
3817 {
3818 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3819 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3820 bytes -= 1;
3821 ofs += 1;
3822 }
3823
3824 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3825
3826 /* Now save it back out again. */
3827
3828 i = 0, ofs = 0;
3829
3830 /* Write out the data in whatever chunks reading the source allowed. */
3831 if (dst_align >= 64)
3832 {
3833 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3834 {
3835 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3836 data_regs[i]);
3837 ofs += 8;
3838 i++;
3839 }
3840 }
3841
3842 if (dst_align >= 32)
3843 {
3844 /* If the source has remaining DImode regs, write them out in
3845 two pieces. */
3846 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3847 {
3848 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3849 NULL_RTX, 1, OPTAB_WIDEN);
3850
3851 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3852 gen_lowpart (SImode, data_regs[i]));
3853 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3854 gen_lowpart (SImode, tmp));
3855 ofs += 8;
3856 i++;
3857 }
3858
3859 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3860 {
3861 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3862 data_regs[i]);
3863 ofs += 4;
3864 i++;
3865 }
3866 }
3867
3868 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3869 {
3870 /* Write out a remaining block of words using unaligned methods. */
3871
3872 for (words = 1; i + words < nregs; words++)
3873 if (GET_MODE (data_regs[i + words]) != DImode)
3874 break;
3875
3876 if (words == 1)
3877 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3878 else
3879 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3880 words, ofs);
3881
3882 i += words;
3883 ofs += words * 8;
3884 }
3885
3886 /* Due to the above, this won't be aligned. */
3887 /* ??? If we have more than one of these, consider constructing full
3888 words in registers and using alpha_expand_unaligned_store_words. */
3889 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3890 {
3891 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3892 ofs += 4;
3893 i++;
3894 }
3895
3896 if (dst_align >= 16)
3897 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3898 {
3899 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
3900 i++;
3901 ofs += 2;
3902 }
3903 else
3904 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3905 {
3906 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
3907 i++;
3908 ofs += 2;
3909 }
3910
3911 /* The remainder must be byte copies. */
3912 while (i < nregs)
3913 {
3914 gcc_assert (GET_MODE (data_regs[i]) == QImode);
3915 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
3916 i++;
3917 ofs += 1;
3918 }
3919
3920 return 1;
3921 }
3922
3923 int
3924 alpha_expand_block_clear (rtx operands[])
3925 {
3926 rtx bytes_rtx = operands[1];
3927 rtx align_rtx = operands[3];
3928 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3929 HOST_WIDE_INT bytes = orig_bytes;
3930 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
3931 HOST_WIDE_INT alignofs = 0;
3932 rtx orig_dst = operands[0];
3933 rtx tmp;
3934 int i, words, ofs = 0;
3935
3936 if (orig_bytes <= 0)
3937 return 1;
3938 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3939 return 0;
3940
3941 /* Look for stricter alignment. */
3942 tmp = XEXP (orig_dst, 0);
3943 if (REG_P (tmp))
3944 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3945 else if (GET_CODE (tmp) == PLUS
3946 && REG_P (XEXP (tmp, 0))
3947 && CONST_INT_P (XEXP (tmp, 1)))
3948 {
3949 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3950 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3951
3952 if (a > align)
3953 {
3954 if (a >= 64)
3955 align = a, alignofs = 8 - c % 8;
3956 else if (a >= 32)
3957 align = a, alignofs = 4 - c % 4;
3958 else if (a >= 16)
3959 align = a, alignofs = 2 - c % 2;
3960 }
3961 }
3962
3963 /* Handle an unaligned prefix first. */
3964
3965 if (alignofs > 0)
3966 {
3967 #if HOST_BITS_PER_WIDE_INT >= 64
3968 /* Given that alignofs is bounded by align, the only time BWX could
3969 generate three stores is for a 7 byte fill. Prefer two individual
3970 stores over a load/mask/store sequence. */
3971 if ((!TARGET_BWX || alignofs == 7)
3972 && align >= 32
3973 && !(alignofs == 4 && bytes >= 4))
3974 {
3975 enum machine_mode mode = (align >= 64 ? DImode : SImode);
3976 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
3977 rtx mem, tmp;
3978 HOST_WIDE_INT mask;
3979
3980 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
3981 set_mem_alias_set (mem, 0);
3982
3983 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
3984 if (bytes < alignofs)
3985 {
3986 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
3987 ofs += bytes;
3988 bytes = 0;
3989 }
3990 else
3991 {
3992 bytes -= alignofs;
3993 ofs += alignofs;
3994 }
3995 alignofs = 0;
3996
3997 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
3998 NULL_RTX, 1, OPTAB_WIDEN);
3999
4000 emit_move_insn (mem, tmp);
4001 }
4002 #endif
4003
4004 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4005 {
4006 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4007 bytes -= 1;
4008 ofs += 1;
4009 alignofs -= 1;
4010 }
4011 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4012 {
4013 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4014 bytes -= 2;
4015 ofs += 2;
4016 alignofs -= 2;
4017 }
4018 if (alignofs == 4 && bytes >= 4)
4019 {
4020 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4021 bytes -= 4;
4022 ofs += 4;
4023 alignofs = 0;
4024 }
4025
4026 /* If we've not used the extra lead alignment information by now,
4027 we won't be able to. Downgrade align to match what's left over. */
4028 if (alignofs > 0)
4029 {
4030 alignofs = alignofs & -alignofs;
4031 align = MIN (align, alignofs * BITS_PER_UNIT);
4032 }
4033 }
4034
4035 /* Handle a block of contiguous long-words. */
4036
4037 if (align >= 64 && bytes >= 8)
4038 {
4039 words = bytes / 8;
4040
4041 for (i = 0; i < words; ++i)
4042 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4043 const0_rtx);
4044
4045 bytes -= words * 8;
4046 ofs += words * 8;
4047 }
4048
4049 /* If the block is large and appropriately aligned, emit a single
4050 store followed by a sequence of stq_u insns. */
4051
4052 if (align >= 32 && bytes > 16)
4053 {
4054 rtx orig_dsta;
4055
4056 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4057 bytes -= 4;
4058 ofs += 4;
4059
4060 orig_dsta = XEXP (orig_dst, 0);
4061 if (GET_CODE (orig_dsta) == LO_SUM)
4062 orig_dsta = force_reg (Pmode, orig_dsta);
4063
4064 words = bytes / 8;
4065 for (i = 0; i < words; ++i)
4066 {
4067 rtx mem
4068 = change_address (orig_dst, DImode,
4069 gen_rtx_AND (DImode,
4070 plus_constant (orig_dsta, ofs + i*8),
4071 GEN_INT (-8)));
4072 set_mem_alias_set (mem, 0);
4073 emit_move_insn (mem, const0_rtx);
4074 }
4075
4076 /* Depending on the alignment, the first stq_u may have overlapped
4077 with the initial stl, which means that the last stq_u didn't
4078 write as much as it would appear. Leave those questionable bytes
4079 unaccounted for. */
4080 bytes -= words * 8 - 4;
4081 ofs += words * 8 - 4;
4082 }
4083
4084 /* Handle a smaller block of aligned words. */
4085
4086 if ((align >= 64 && bytes == 4)
4087 || (align == 32 && bytes >= 4))
4088 {
4089 words = bytes / 4;
4090
4091 for (i = 0; i < words; ++i)
4092 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4093 const0_rtx);
4094
4095 bytes -= words * 4;
4096 ofs += words * 4;
4097 }
4098
4099 /* An unaligned block uses stq_u stores for as many as possible. */
4100
4101 if (bytes >= 8)
4102 {
4103 words = bytes / 8;
4104
4105 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4106
4107 bytes -= words * 8;
4108 ofs += words * 8;
4109 }
4110
4111 /* Next clean up any trailing pieces. */
4112
4113 #if HOST_BITS_PER_WIDE_INT >= 64
4114 /* Count the number of bits in BYTES for which aligned stores could
4115 be emitted. */
4116 words = 0;
4117 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4118 if (bytes & i)
4119 words += 1;
4120
4121 /* If we have appropriate alignment (and it wouldn't take too many
4122 instructions otherwise), mask out the bytes we need. */
4123 if (TARGET_BWX ? words > 2 : bytes > 0)
4124 {
4125 if (align >= 64)
4126 {
4127 rtx mem, tmp;
4128 HOST_WIDE_INT mask;
4129
4130 mem = adjust_address (orig_dst, DImode, ofs);
4131 set_mem_alias_set (mem, 0);
4132
4133 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4134
4135 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4136 NULL_RTX, 1, OPTAB_WIDEN);
4137
4138 emit_move_insn (mem, tmp);
4139 return 1;
4140 }
4141 else if (align >= 32 && bytes < 4)
4142 {
4143 rtx mem, tmp;
4144 HOST_WIDE_INT mask;
4145
4146 mem = adjust_address (orig_dst, SImode, ofs);
4147 set_mem_alias_set (mem, 0);
4148
4149 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4150
4151 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4152 NULL_RTX, 1, OPTAB_WIDEN);
4153
4154 emit_move_insn (mem, tmp);
4155 return 1;
4156 }
4157 }
4158 #endif
4159
4160 if (!TARGET_BWX && bytes >= 4)
4161 {
4162 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4163 bytes -= 4;
4164 ofs += 4;
4165 }
4166
4167 if (bytes >= 2)
4168 {
4169 if (align >= 16)
4170 {
4171 do {
4172 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4173 const0_rtx);
4174 bytes -= 2;
4175 ofs += 2;
4176 } while (bytes >= 2);
4177 }
4178 else if (! TARGET_BWX)
4179 {
4180 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4181 bytes -= 2;
4182 ofs += 2;
4183 }
4184 }
4185
4186 while (bytes > 0)
4187 {
4188 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4189 bytes -= 1;
4190 ofs += 1;
4191 }
4192
4193 return 1;
4194 }
4195
4196 /* Returns a mask so that zap(x, value) == x & mask. */
4197
4198 rtx
4199 alpha_expand_zap_mask (HOST_WIDE_INT value)
4200 {
4201 rtx result;
4202 int i;
4203
4204 if (HOST_BITS_PER_WIDE_INT >= 64)
4205 {
4206 HOST_WIDE_INT mask = 0;
4207
4208 for (i = 7; i >= 0; --i)
4209 {
4210 mask <<= 8;
4211 if (!((value >> i) & 1))
4212 mask |= 0xff;
4213 }
4214
4215 result = gen_int_mode (mask, DImode);
4216 }
4217 else
4218 {
4219 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4220
4221 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4222
4223 for (i = 7; i >= 4; --i)
4224 {
4225 mask_hi <<= 8;
4226 if (!((value >> i) & 1))
4227 mask_hi |= 0xff;
4228 }
4229
4230 for (i = 3; i >= 0; --i)
4231 {
4232 mask_lo <<= 8;
4233 if (!((value >> i) & 1))
4234 mask_lo |= 0xff;
4235 }
4236
4237 result = immed_double_const (mask_lo, mask_hi, DImode);
4238 }
4239
4240 return result;
4241 }
4242
4243 void
4244 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4245 enum machine_mode mode,
4246 rtx op0, rtx op1, rtx op2)
4247 {
4248 op0 = gen_lowpart (mode, op0);
4249
4250 if (op1 == const0_rtx)
4251 op1 = CONST0_RTX (mode);
4252 else
4253 op1 = gen_lowpart (mode, op1);
4254
4255 if (op2 == const0_rtx)
4256 op2 = CONST0_RTX (mode);
4257 else
4258 op2 = gen_lowpart (mode, op2);
4259
4260 emit_insn ((*gen) (op0, op1, op2));
4261 }
4262
4263 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4264 COND is true. Mark the jump as unlikely to be taken. */
4265
4266 static void
4267 emit_unlikely_jump (rtx cond, rtx label)
4268 {
4269 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4270 rtx x;
4271
4272 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4273 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4274 add_reg_note (x, REG_BR_PROB, very_unlikely);
4275 }
4276
4277 /* A subroutine of the atomic operation splitters. Emit a load-locked
4278 instruction in MODE. */
4279
4280 static void
4281 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4282 {
4283 rtx (*fn) (rtx, rtx) = NULL;
4284 if (mode == SImode)
4285 fn = gen_load_locked_si;
4286 else if (mode == DImode)
4287 fn = gen_load_locked_di;
4288 emit_insn (fn (reg, mem));
4289 }
4290
4291 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4292 instruction in MODE. */
4293
4294 static void
4295 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4296 {
4297 rtx (*fn) (rtx, rtx, rtx) = NULL;
4298 if (mode == SImode)
4299 fn = gen_store_conditional_si;
4300 else if (mode == DImode)
4301 fn = gen_store_conditional_di;
4302 emit_insn (fn (res, mem, val));
4303 }
4304
4305 /* A subroutine of the atomic operation splitters. Emit an insxl
4306 instruction in MODE. */
4307
4308 static rtx
4309 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4310 {
4311 rtx ret = gen_reg_rtx (DImode);
4312 rtx (*fn) (rtx, rtx, rtx);
4313
4314 if (mode == QImode)
4315 fn = gen_insbl;
4316 else
4317 fn = gen_inswl;
4318
4319 /* The insbl and inswl patterns require a register operand. */
4320 op1 = force_reg (mode, op1);
4321 emit_insn (fn (ret, op1, op2));
4322
4323 return ret;
4324 }
4325
4326 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4327 to perform. MEM is the memory on which to operate. VAL is the second
4328 operand of the binary operator. BEFORE and AFTER are optional locations to
4329 return the value of MEM either before of after the operation. SCRATCH is
4330 a scratch register. */
4331
4332 void
4333 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4334 rtx before, rtx after, rtx scratch)
4335 {
4336 enum machine_mode mode = GET_MODE (mem);
4337 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4338
4339 emit_insn (gen_memory_barrier ());
4340
4341 label = gen_label_rtx ();
4342 emit_label (label);
4343 label = gen_rtx_LABEL_REF (DImode, label);
4344
4345 if (before == NULL)
4346 before = scratch;
4347 emit_load_locked (mode, before, mem);
4348
4349 if (code == NOT)
4350 {
4351 x = gen_rtx_AND (mode, before, val);
4352 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4353
4354 x = gen_rtx_NOT (mode, val);
4355 }
4356 else
4357 x = gen_rtx_fmt_ee (code, mode, before, val);
4358 if (after)
4359 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4360 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4361
4362 emit_store_conditional (mode, cond, mem, scratch);
4363
4364 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4365 emit_unlikely_jump (x, label);
4366
4367 emit_insn (gen_memory_barrier ());
4368 }
4369
4370 /* Expand a compare and swap operation. */
4371
4372 void
4373 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4374 rtx scratch)
4375 {
4376 enum machine_mode mode = GET_MODE (mem);
4377 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4378
4379 emit_insn (gen_memory_barrier ());
4380
4381 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4382 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4383 emit_label (XEXP (label1, 0));
4384
4385 emit_load_locked (mode, retval, mem);
4386
4387 x = gen_lowpart (DImode, retval);
4388 if (oldval == const0_rtx)
4389 x = gen_rtx_NE (DImode, x, const0_rtx);
4390 else
4391 {
4392 x = gen_rtx_EQ (DImode, x, oldval);
4393 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4394 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4395 }
4396 emit_unlikely_jump (x, label2);
4397
4398 emit_move_insn (scratch, newval);
4399 emit_store_conditional (mode, cond, mem, scratch);
4400
4401 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4402 emit_unlikely_jump (x, label1);
4403
4404 emit_insn (gen_memory_barrier ());
4405 emit_label (XEXP (label2, 0));
4406 }
4407
4408 void
4409 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4410 {
4411 enum machine_mode mode = GET_MODE (mem);
4412 rtx addr, align, wdst;
4413 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4414
4415 addr = force_reg (DImode, XEXP (mem, 0));
4416 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4417 NULL_RTX, 1, OPTAB_DIRECT);
4418
4419 oldval = convert_modes (DImode, mode, oldval, 1);
4420 newval = emit_insxl (mode, newval, addr);
4421
4422 wdst = gen_reg_rtx (DImode);
4423 if (mode == QImode)
4424 fn5 = gen_sync_compare_and_swapqi_1;
4425 else
4426 fn5 = gen_sync_compare_and_swaphi_1;
4427 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4428
4429 emit_move_insn (dst, gen_lowpart (mode, wdst));
4430 }
4431
4432 void
4433 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4434 rtx oldval, rtx newval, rtx align,
4435 rtx scratch, rtx cond)
4436 {
4437 rtx label1, label2, mem, width, mask, x;
4438
4439 mem = gen_rtx_MEM (DImode, align);
4440 MEM_VOLATILE_P (mem) = 1;
4441
4442 emit_insn (gen_memory_barrier ());
4443 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4444 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4445 emit_label (XEXP (label1, 0));
4446
4447 emit_load_locked (DImode, scratch, mem);
4448
4449 width = GEN_INT (GET_MODE_BITSIZE (mode));
4450 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4451 emit_insn (gen_extxl (dest, scratch, width, addr));
4452
4453 if (oldval == const0_rtx)
4454 x = gen_rtx_NE (DImode, dest, const0_rtx);
4455 else
4456 {
4457 x = gen_rtx_EQ (DImode, dest, oldval);
4458 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4459 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4460 }
4461 emit_unlikely_jump (x, label2);
4462
4463 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4464 emit_insn (gen_iordi3 (scratch, scratch, newval));
4465
4466 emit_store_conditional (DImode, scratch, mem, scratch);
4467
4468 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4469 emit_unlikely_jump (x, label1);
4470
4471 emit_insn (gen_memory_barrier ());
4472 emit_label (XEXP (label2, 0));
4473 }
4474
4475 /* Expand an atomic exchange operation. */
4476
4477 void
4478 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4479 {
4480 enum machine_mode mode = GET_MODE (mem);
4481 rtx label, x, cond = gen_lowpart (DImode, scratch);
4482
4483 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4484 emit_label (XEXP (label, 0));
4485
4486 emit_load_locked (mode, retval, mem);
4487 emit_move_insn (scratch, val);
4488 emit_store_conditional (mode, cond, mem, scratch);
4489
4490 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4491 emit_unlikely_jump (x, label);
4492
4493 emit_insn (gen_memory_barrier ());
4494 }
4495
4496 void
4497 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4498 {
4499 enum machine_mode mode = GET_MODE (mem);
4500 rtx addr, align, wdst;
4501 rtx (*fn4) (rtx, rtx, rtx, rtx);
4502
4503 /* Force the address into a register. */
4504 addr = force_reg (DImode, XEXP (mem, 0));
4505
4506 /* Align it to a multiple of 8. */
4507 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4508 NULL_RTX, 1, OPTAB_DIRECT);
4509
4510 /* Insert val into the correct byte location within the word. */
4511 val = emit_insxl (mode, val, addr);
4512
4513 wdst = gen_reg_rtx (DImode);
4514 if (mode == QImode)
4515 fn4 = gen_sync_lock_test_and_setqi_1;
4516 else
4517 fn4 = gen_sync_lock_test_and_sethi_1;
4518 emit_insn (fn4 (wdst, addr, val, align));
4519
4520 emit_move_insn (dst, gen_lowpart (mode, wdst));
4521 }
4522
4523 void
4524 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4525 rtx val, rtx align, rtx scratch)
4526 {
4527 rtx label, mem, width, mask, x;
4528
4529 mem = gen_rtx_MEM (DImode, align);
4530 MEM_VOLATILE_P (mem) = 1;
4531
4532 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4533 emit_label (XEXP (label, 0));
4534
4535 emit_load_locked (DImode, scratch, mem);
4536
4537 width = GEN_INT (GET_MODE_BITSIZE (mode));
4538 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4539 emit_insn (gen_extxl (dest, scratch, width, addr));
4540 emit_insn (gen_mskxl (scratch, scratch, mask, addr));
4541 emit_insn (gen_iordi3 (scratch, scratch, val));
4542
4543 emit_store_conditional (DImode, scratch, mem, scratch);
4544
4545 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4546 emit_unlikely_jump (x, label);
4547
4548 emit_insn (gen_memory_barrier ());
4549 }
4550 \f
4551 /* Adjust the cost of a scheduling dependency. Return the new cost of
4552 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4553
4554 static int
4555 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4556 {
4557 enum attr_type dep_insn_type;
4558
4559 /* If the dependence is an anti-dependence, there is no cost. For an
4560 output dependence, there is sometimes a cost, but it doesn't seem
4561 worth handling those few cases. */
4562 if (REG_NOTE_KIND (link) != 0)
4563 return cost;
4564
4565 /* If we can't recognize the insns, we can't really do anything. */
4566 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4567 return cost;
4568
4569 dep_insn_type = get_attr_type (dep_insn);
4570
4571 /* Bring in the user-defined memory latency. */
4572 if (dep_insn_type == TYPE_ILD
4573 || dep_insn_type == TYPE_FLD
4574 || dep_insn_type == TYPE_LDSYM)
4575 cost += alpha_memory_latency-1;
4576
4577 /* Everything else handled in DFA bypasses now. */
4578
4579 return cost;
4580 }
4581
4582 /* The number of instructions that can be issued per cycle. */
4583
4584 static int
4585 alpha_issue_rate (void)
4586 {
4587 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4588 }
4589
4590 /* How many alternative schedules to try. This should be as wide as the
4591 scheduling freedom in the DFA, but no wider. Making this value too
4592 large results extra work for the scheduler.
4593
4594 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4595 alternative schedules. For EV5, we can choose between E0/E1 and
4596 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4597
4598 static int
4599 alpha_multipass_dfa_lookahead (void)
4600 {
4601 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4602 }
4603 \f
4604 /* Machine-specific function data. */
4605
4606 struct GTY(()) machine_function
4607 {
4608 /* For OSF. */
4609 const char *some_ld_name;
4610
4611 /* For TARGET_LD_BUGGY_LDGP. */
4612 struct rtx_def *gp_save_rtx;
4613
4614 /* For VMS condition handlers. */
4615 bool uses_condition_handler;
4616 };
4617
4618 /* How to allocate a 'struct machine_function'. */
4619
4620 static struct machine_function *
4621 alpha_init_machine_status (void)
4622 {
4623 return ggc_alloc_cleared_machine_function ();
4624 }
4625
4626 /* Support for frame based VMS condition handlers. */
4627
4628 /* A VMS condition handler may be established for a function with a call to
4629 __builtin_establish_vms_condition_handler, and cancelled with a call to
4630 __builtin_revert_vms_condition_handler.
4631
4632 The VMS Condition Handling Facility knows about the existence of a handler
4633 from the procedure descriptor .handler field. As the VMS native compilers,
4634 we store the user specified handler's address at a fixed location in the
4635 stack frame and point the procedure descriptor at a common wrapper which
4636 fetches the real handler's address and issues an indirect call.
4637
4638 The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
4639
4640 We force the procedure kind to PT_STACK, and the fixed frame location is
4641 fp+8, just before the register save area. We use the handler_data field in
4642 the procedure descriptor to state the fp offset at which the installed
4643 handler address can be found. */
4644
4645 #define VMS_COND_HANDLER_FP_OFFSET 8
4646
4647 /* Expand code to store the currently installed user VMS condition handler
4648 into TARGET and install HANDLER as the new condition handler. */
4649
4650 void
4651 alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
4652 {
4653 rtx handler_slot_address
4654 = plus_constant (hard_frame_pointer_rtx, VMS_COND_HANDLER_FP_OFFSET);
4655
4656 rtx handler_slot
4657 = gen_rtx_MEM (DImode, handler_slot_address);
4658
4659 emit_move_insn (target, handler_slot);
4660 emit_move_insn (handler_slot, handler);
4661
4662 /* Notify the start/prologue/epilogue emitters that the condition handler
4663 slot is needed. In addition to reserving the slot space, this will force
4664 the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
4665 use above is correct. */
4666 cfun->machine->uses_condition_handler = true;
4667 }
4668
4669 /* Expand code to store the current VMS condition handler into TARGET and
4670 nullify it. */
4671
4672 void
4673 alpha_expand_builtin_revert_vms_condition_handler (rtx target)
4674 {
4675 /* We implement this by establishing a null condition handler, with the tiny
4676 side effect of setting uses_condition_handler. This is a little bit
4677 pessimistic if no actual builtin_establish call is ever issued, which is
4678 not a real problem and expected never to happen anyway. */
4679
4680 alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
4681 }
4682
4683 /* Functions to save and restore alpha_return_addr_rtx. */
4684
4685 /* Start the ball rolling with RETURN_ADDR_RTX. */
4686
4687 rtx
4688 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4689 {
4690 if (count != 0)
4691 return const0_rtx;
4692
4693 return get_hard_reg_initial_val (Pmode, REG_RA);
4694 }
4695
4696 /* Return or create a memory slot containing the gp value for the current
4697 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4698
4699 rtx
4700 alpha_gp_save_rtx (void)
4701 {
4702 rtx seq, m = cfun->machine->gp_save_rtx;
4703
4704 if (m == NULL)
4705 {
4706 start_sequence ();
4707
4708 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4709 m = validize_mem (m);
4710 emit_move_insn (m, pic_offset_table_rtx);
4711
4712 seq = get_insns ();
4713 end_sequence ();
4714
4715 /* We used to simply emit the sequence after entry_of_function.
4716 However this breaks the CFG if the first instruction in the
4717 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4718 label. Emit the sequence properly on the edge. We are only
4719 invoked from dw2_build_landing_pads and finish_eh_generation
4720 will call commit_edge_insertions thanks to a kludge. */
4721 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4722
4723 cfun->machine->gp_save_rtx = m;
4724 }
4725
4726 return m;
4727 }
4728
4729 static int
4730 alpha_ra_ever_killed (void)
4731 {
4732 rtx top;
4733
4734 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4735 return (int)df_regs_ever_live_p (REG_RA);
4736
4737 push_topmost_sequence ();
4738 top = get_insns ();
4739 pop_topmost_sequence ();
4740
4741 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4742 }
4743
4744 \f
4745 /* Return the trap mode suffix applicable to the current
4746 instruction, or NULL. */
4747
4748 static const char *
4749 get_trap_mode_suffix (void)
4750 {
4751 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4752
4753 switch (s)
4754 {
4755 case TRAP_SUFFIX_NONE:
4756 return NULL;
4757
4758 case TRAP_SUFFIX_SU:
4759 if (alpha_fptm >= ALPHA_FPTM_SU)
4760 return "su";
4761 return NULL;
4762
4763 case TRAP_SUFFIX_SUI:
4764 if (alpha_fptm >= ALPHA_FPTM_SUI)
4765 return "sui";
4766 return NULL;
4767
4768 case TRAP_SUFFIX_V_SV:
4769 switch (alpha_fptm)
4770 {
4771 case ALPHA_FPTM_N:
4772 return NULL;
4773 case ALPHA_FPTM_U:
4774 return "v";
4775 case ALPHA_FPTM_SU:
4776 case ALPHA_FPTM_SUI:
4777 return "sv";
4778 default:
4779 gcc_unreachable ();
4780 }
4781
4782 case TRAP_SUFFIX_V_SV_SVI:
4783 switch (alpha_fptm)
4784 {
4785 case ALPHA_FPTM_N:
4786 return NULL;
4787 case ALPHA_FPTM_U:
4788 return "v";
4789 case ALPHA_FPTM_SU:
4790 return "sv";
4791 case ALPHA_FPTM_SUI:
4792 return "svi";
4793 default:
4794 gcc_unreachable ();
4795 }
4796 break;
4797
4798 case TRAP_SUFFIX_U_SU_SUI:
4799 switch (alpha_fptm)
4800 {
4801 case ALPHA_FPTM_N:
4802 return NULL;
4803 case ALPHA_FPTM_U:
4804 return "u";
4805 case ALPHA_FPTM_SU:
4806 return "su";
4807 case ALPHA_FPTM_SUI:
4808 return "sui";
4809 default:
4810 gcc_unreachable ();
4811 }
4812 break;
4813
4814 default:
4815 gcc_unreachable ();
4816 }
4817 gcc_unreachable ();
4818 }
4819
4820 /* Return the rounding mode suffix applicable to the current
4821 instruction, or NULL. */
4822
4823 static const char *
4824 get_round_mode_suffix (void)
4825 {
4826 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4827
4828 switch (s)
4829 {
4830 case ROUND_SUFFIX_NONE:
4831 return NULL;
4832 case ROUND_SUFFIX_NORMAL:
4833 switch (alpha_fprm)
4834 {
4835 case ALPHA_FPRM_NORM:
4836 return NULL;
4837 case ALPHA_FPRM_MINF:
4838 return "m";
4839 case ALPHA_FPRM_CHOP:
4840 return "c";
4841 case ALPHA_FPRM_DYN:
4842 return "d";
4843 default:
4844 gcc_unreachable ();
4845 }
4846 break;
4847
4848 case ROUND_SUFFIX_C:
4849 return "c";
4850
4851 default:
4852 gcc_unreachable ();
4853 }
4854 gcc_unreachable ();
4855 }
4856
4857 /* Locate some local-dynamic symbol still in use by this function
4858 so that we can print its name in some movdi_er_tlsldm pattern. */
4859
4860 static int
4861 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4862 {
4863 rtx x = *px;
4864
4865 if (GET_CODE (x) == SYMBOL_REF
4866 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4867 {
4868 cfun->machine->some_ld_name = XSTR (x, 0);
4869 return 1;
4870 }
4871
4872 return 0;
4873 }
4874
4875 static const char *
4876 get_some_local_dynamic_name (void)
4877 {
4878 rtx insn;
4879
4880 if (cfun->machine->some_ld_name)
4881 return cfun->machine->some_ld_name;
4882
4883 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4884 if (INSN_P (insn)
4885 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4886 return cfun->machine->some_ld_name;
4887
4888 gcc_unreachable ();
4889 }
4890
4891 /* Print an operand. Recognize special options, documented below. */
4892
4893 void
4894 print_operand (FILE *file, rtx x, int code)
4895 {
4896 int i;
4897
4898 switch (code)
4899 {
4900 case '~':
4901 /* Print the assembler name of the current function. */
4902 assemble_name (file, alpha_fnname);
4903 break;
4904
4905 case '&':
4906 assemble_name (file, get_some_local_dynamic_name ());
4907 break;
4908
4909 case '/':
4910 {
4911 const char *trap = get_trap_mode_suffix ();
4912 const char *round = get_round_mode_suffix ();
4913
4914 if (trap || round)
4915 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4916 (trap ? trap : ""), (round ? round : ""));
4917 break;
4918 }
4919
4920 case ',':
4921 /* Generates single precision instruction suffix. */
4922 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4923 break;
4924
4925 case '-':
4926 /* Generates double precision instruction suffix. */
4927 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4928 break;
4929
4930 case '#':
4931 if (alpha_this_literal_sequence_number == 0)
4932 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
4933 fprintf (file, "%d", alpha_this_literal_sequence_number);
4934 break;
4935
4936 case '*':
4937 if (alpha_this_gpdisp_sequence_number == 0)
4938 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
4939 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
4940 break;
4941
4942 case 'H':
4943 if (GET_CODE (x) == HIGH)
4944 output_addr_const (file, XEXP (x, 0));
4945 else
4946 output_operand_lossage ("invalid %%H value");
4947 break;
4948
4949 case 'J':
4950 {
4951 const char *lituse;
4952
4953 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
4954 {
4955 x = XVECEXP (x, 0, 0);
4956 lituse = "lituse_tlsgd";
4957 }
4958 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
4959 {
4960 x = XVECEXP (x, 0, 0);
4961 lituse = "lituse_tlsldm";
4962 }
4963 else if (CONST_INT_P (x))
4964 lituse = "lituse_jsr";
4965 else
4966 {
4967 output_operand_lossage ("invalid %%J value");
4968 break;
4969 }
4970
4971 if (x != const0_rtx)
4972 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4973 }
4974 break;
4975
4976 case 'j':
4977 {
4978 const char *lituse;
4979
4980 #ifdef HAVE_AS_JSRDIRECT_RELOCS
4981 lituse = "lituse_jsrdirect";
4982 #else
4983 lituse = "lituse_jsr";
4984 #endif
4985
4986 gcc_assert (INTVAL (x) != 0);
4987 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
4988 }
4989 break;
4990 case 'r':
4991 /* If this operand is the constant zero, write it as "$31". */
4992 if (REG_P (x))
4993 fprintf (file, "%s", reg_names[REGNO (x)]);
4994 else if (x == CONST0_RTX (GET_MODE (x)))
4995 fprintf (file, "$31");
4996 else
4997 output_operand_lossage ("invalid %%r value");
4998 break;
4999
5000 case 'R':
5001 /* Similar, but for floating-point. */
5002 if (REG_P (x))
5003 fprintf (file, "%s", reg_names[REGNO (x)]);
5004 else if (x == CONST0_RTX (GET_MODE (x)))
5005 fprintf (file, "$f31");
5006 else
5007 output_operand_lossage ("invalid %%R value");
5008 break;
5009
5010 case 'N':
5011 /* Write the 1's complement of a constant. */
5012 if (!CONST_INT_P (x))
5013 output_operand_lossage ("invalid %%N value");
5014
5015 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5016 break;
5017
5018 case 'P':
5019 /* Write 1 << C, for a constant C. */
5020 if (!CONST_INT_P (x))
5021 output_operand_lossage ("invalid %%P value");
5022
5023 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5024 break;
5025
5026 case 'h':
5027 /* Write the high-order 16 bits of a constant, sign-extended. */
5028 if (!CONST_INT_P (x))
5029 output_operand_lossage ("invalid %%h value");
5030
5031 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5032 break;
5033
5034 case 'L':
5035 /* Write the low-order 16 bits of a constant, sign-extended. */
5036 if (!CONST_INT_P (x))
5037 output_operand_lossage ("invalid %%L value");
5038
5039 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5040 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5041 break;
5042
5043 case 'm':
5044 /* Write mask for ZAP insn. */
5045 if (GET_CODE (x) == CONST_DOUBLE)
5046 {
5047 HOST_WIDE_INT mask = 0;
5048 HOST_WIDE_INT value;
5049
5050 value = CONST_DOUBLE_LOW (x);
5051 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5052 i++, value >>= 8)
5053 if (value & 0xff)
5054 mask |= (1 << i);
5055
5056 value = CONST_DOUBLE_HIGH (x);
5057 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5058 i++, value >>= 8)
5059 if (value & 0xff)
5060 mask |= (1 << (i + sizeof (int)));
5061
5062 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5063 }
5064
5065 else if (CONST_INT_P (x))
5066 {
5067 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5068
5069 for (i = 0; i < 8; i++, value >>= 8)
5070 if (value & 0xff)
5071 mask |= (1 << i);
5072
5073 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5074 }
5075 else
5076 output_operand_lossage ("invalid %%m value");
5077 break;
5078
5079 case 'M':
5080 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5081 if (!CONST_INT_P (x)
5082 || (INTVAL (x) != 8 && INTVAL (x) != 16
5083 && INTVAL (x) != 32 && INTVAL (x) != 64))
5084 output_operand_lossage ("invalid %%M value");
5085
5086 fprintf (file, "%s",
5087 (INTVAL (x) == 8 ? "b"
5088 : INTVAL (x) == 16 ? "w"
5089 : INTVAL (x) == 32 ? "l"
5090 : "q"));
5091 break;
5092
5093 case 'U':
5094 /* Similar, except do it from the mask. */
5095 if (CONST_INT_P (x))
5096 {
5097 HOST_WIDE_INT value = INTVAL (x);
5098
5099 if (value == 0xff)
5100 {
5101 fputc ('b', file);
5102 break;
5103 }
5104 if (value == 0xffff)
5105 {
5106 fputc ('w', file);
5107 break;
5108 }
5109 if (value == 0xffffffff)
5110 {
5111 fputc ('l', file);
5112 break;
5113 }
5114 if (value == -1)
5115 {
5116 fputc ('q', file);
5117 break;
5118 }
5119 }
5120 else if (HOST_BITS_PER_WIDE_INT == 32
5121 && GET_CODE (x) == CONST_DOUBLE
5122 && CONST_DOUBLE_LOW (x) == 0xffffffff
5123 && CONST_DOUBLE_HIGH (x) == 0)
5124 {
5125 fputc ('l', file);
5126 break;
5127 }
5128 output_operand_lossage ("invalid %%U value");
5129 break;
5130
5131 case 's':
5132 /* Write the constant value divided by 8. */
5133 if (!CONST_INT_P (x)
5134 || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5135 || (INTVAL (x) & 7) != 0)
5136 output_operand_lossage ("invalid %%s value");
5137
5138 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
5139 break;
5140
5141 case 'S':
5142 /* Same, except compute (64 - c) / 8 */
5143
5144 if (!CONST_INT_P (x)
5145 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5146 && (INTVAL (x) & 7) != 8)
5147 output_operand_lossage ("invalid %%s value");
5148
5149 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5150 break;
5151
5152 case 'C': case 'D': case 'c': case 'd':
5153 /* Write out comparison name. */
5154 {
5155 enum rtx_code c = GET_CODE (x);
5156
5157 if (!COMPARISON_P (x))
5158 output_operand_lossage ("invalid %%C value");
5159
5160 else if (code == 'D')
5161 c = reverse_condition (c);
5162 else if (code == 'c')
5163 c = swap_condition (c);
5164 else if (code == 'd')
5165 c = swap_condition (reverse_condition (c));
5166
5167 if (c == LEU)
5168 fprintf (file, "ule");
5169 else if (c == LTU)
5170 fprintf (file, "ult");
5171 else if (c == UNORDERED)
5172 fprintf (file, "un");
5173 else
5174 fprintf (file, "%s", GET_RTX_NAME (c));
5175 }
5176 break;
5177
5178 case 'E':
5179 /* Write the divide or modulus operator. */
5180 switch (GET_CODE (x))
5181 {
5182 case DIV:
5183 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5184 break;
5185 case UDIV:
5186 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5187 break;
5188 case MOD:
5189 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5190 break;
5191 case UMOD:
5192 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5193 break;
5194 default:
5195 output_operand_lossage ("invalid %%E value");
5196 break;
5197 }
5198 break;
5199
5200 case 'A':
5201 /* Write "_u" for unaligned access. */
5202 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5203 fprintf (file, "_u");
5204 break;
5205
5206 case 0:
5207 if (REG_P (x))
5208 fprintf (file, "%s", reg_names[REGNO (x)]);
5209 else if (MEM_P (x))
5210 output_address (XEXP (x, 0));
5211 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5212 {
5213 switch (XINT (XEXP (x, 0), 1))
5214 {
5215 case UNSPEC_DTPREL:
5216 case UNSPEC_TPREL:
5217 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5218 break;
5219 default:
5220 output_operand_lossage ("unknown relocation unspec");
5221 break;
5222 }
5223 }
5224 else
5225 output_addr_const (file, x);
5226 break;
5227
5228 default:
5229 output_operand_lossage ("invalid %%xn code");
5230 }
5231 }
5232
5233 void
5234 print_operand_address (FILE *file, rtx addr)
5235 {
5236 int basereg = 31;
5237 HOST_WIDE_INT offset = 0;
5238
5239 if (GET_CODE (addr) == AND)
5240 addr = XEXP (addr, 0);
5241
5242 if (GET_CODE (addr) == PLUS
5243 && CONST_INT_P (XEXP (addr, 1)))
5244 {
5245 offset = INTVAL (XEXP (addr, 1));
5246 addr = XEXP (addr, 0);
5247 }
5248
5249 if (GET_CODE (addr) == LO_SUM)
5250 {
5251 const char *reloc16, *reloclo;
5252 rtx op1 = XEXP (addr, 1);
5253
5254 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5255 {
5256 op1 = XEXP (op1, 0);
5257 switch (XINT (op1, 1))
5258 {
5259 case UNSPEC_DTPREL:
5260 reloc16 = NULL;
5261 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5262 break;
5263 case UNSPEC_TPREL:
5264 reloc16 = NULL;
5265 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5266 break;
5267 default:
5268 output_operand_lossage ("unknown relocation unspec");
5269 return;
5270 }
5271
5272 output_addr_const (file, XVECEXP (op1, 0, 0));
5273 }
5274 else
5275 {
5276 reloc16 = "gprel";
5277 reloclo = "gprellow";
5278 output_addr_const (file, op1);
5279 }
5280
5281 if (offset)
5282 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5283
5284 addr = XEXP (addr, 0);
5285 switch (GET_CODE (addr))
5286 {
5287 case REG:
5288 basereg = REGNO (addr);
5289 break;
5290
5291 case SUBREG:
5292 basereg = subreg_regno (addr);
5293 break;
5294
5295 default:
5296 gcc_unreachable ();
5297 }
5298
5299 fprintf (file, "($%d)\t\t!%s", basereg,
5300 (basereg == 29 ? reloc16 : reloclo));
5301 return;
5302 }
5303
5304 switch (GET_CODE (addr))
5305 {
5306 case REG:
5307 basereg = REGNO (addr);
5308 break;
5309
5310 case SUBREG:
5311 basereg = subreg_regno (addr);
5312 break;
5313
5314 case CONST_INT:
5315 offset = INTVAL (addr);
5316 break;
5317
5318 #if TARGET_ABI_OPEN_VMS
5319 case SYMBOL_REF:
5320 fprintf (file, "%s", XSTR (addr, 0));
5321 return;
5322
5323 case CONST:
5324 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5325 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5326 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5327 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5328 INTVAL (XEXP (XEXP (addr, 0), 1)));
5329 return;
5330
5331 #endif
5332 default:
5333 gcc_unreachable ();
5334 }
5335
5336 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5337 }
5338 \f
5339 /* Emit RTL insns to initialize the variable parts of a trampoline at
5340 M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
5341 for the static chain value for the function. */
5342
5343 static void
5344 alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
5345 {
5346 rtx fnaddr, mem, word1, word2;
5347
5348 fnaddr = XEXP (DECL_RTL (fndecl), 0);
5349
5350 #ifdef POINTERS_EXTEND_UNSIGNED
5351 fnaddr = convert_memory_address (Pmode, fnaddr);
5352 chain_value = convert_memory_address (Pmode, chain_value);
5353 #endif
5354
5355 if (TARGET_ABI_OPEN_VMS)
5356 {
5357 const char *fnname;
5358 char *trname;
5359
5360 /* Construct the name of the trampoline entry point. */
5361 fnname = XSTR (fnaddr, 0);
5362 trname = (char *) alloca (strlen (fnname) + 5);
5363 strcpy (trname, fnname);
5364 strcat (trname, "..tr");
5365 fnname = ggc_alloc_string (trname, strlen (trname) + 1);
5366 word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
5367
5368 /* Trampoline (or "bounded") procedure descriptor is constructed from
5369 the function's procedure descriptor with certain fields zeroed IAW
5370 the VMS calling standard. This is stored in the first quadword. */
5371 word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
5372 word1 = expand_and (DImode, word1, GEN_INT (0xffff0fff0000fff0), NULL);
5373 }
5374 else
5375 {
5376 /* These 4 instructions are:
5377 ldq $1,24($27)
5378 ldq $27,16($27)
5379 jmp $31,($27),0
5380 nop
5381 We don't bother setting the HINT field of the jump; the nop
5382 is merely there for padding. */
5383 word1 = GEN_INT (0xa77b0010a43b0018);
5384 word2 = GEN_INT (0x47ff041f6bfb0000);
5385 }
5386
5387 /* Store the first two words, as computed above. */
5388 mem = adjust_address (m_tramp, DImode, 0);
5389 emit_move_insn (mem, word1);
5390 mem = adjust_address (m_tramp, DImode, 8);
5391 emit_move_insn (mem, word2);
5392
5393 /* Store function address and static chain value. */
5394 mem = adjust_address (m_tramp, Pmode, 16);
5395 emit_move_insn (mem, fnaddr);
5396 mem = adjust_address (m_tramp, Pmode, 24);
5397 emit_move_insn (mem, chain_value);
5398
5399 if (TARGET_ABI_OSF)
5400 {
5401 emit_insn (gen_imb ());
5402 #ifdef ENABLE_EXECUTE_STACK
5403 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5404 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
5405 #endif
5406 }
5407 }
5408 \f
5409 /* Determine where to put an argument to a function.
5410 Value is zero to push the argument on the stack,
5411 or a hard register in which to store the argument.
5412
5413 MODE is the argument's machine mode.
5414 TYPE is the data type of the argument (as a tree).
5415 This is null for libcalls where that information may
5416 not be available.
5417 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5418 the preceding args and about the function being called.
5419 NAMED is nonzero if this argument is a named parameter
5420 (otherwise it is an extra parameter matching an ellipsis).
5421
5422 On Alpha the first 6 words of args are normally in registers
5423 and the rest are pushed. */
5424
5425 static rtx
5426 alpha_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5427 const_tree type, bool named ATTRIBUTE_UNUSED)
5428 {
5429 int basereg;
5430 int num_args;
5431
5432 /* Don't get confused and pass small structures in FP registers. */
5433 if (type && AGGREGATE_TYPE_P (type))
5434 basereg = 16;
5435 else
5436 {
5437 #ifdef ENABLE_CHECKING
5438 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5439 values here. */
5440 gcc_assert (!COMPLEX_MODE_P (mode));
5441 #endif
5442
5443 /* Set up defaults for FP operands passed in FP registers, and
5444 integral operands passed in integer registers. */
5445 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5446 basereg = 32 + 16;
5447 else
5448 basereg = 16;
5449 }
5450
5451 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5452 the two platforms, so we can't avoid conditional compilation. */
5453 #if TARGET_ABI_OPEN_VMS
5454 {
5455 if (mode == VOIDmode)
5456 return alpha_arg_info_reg_val (*cum);
5457
5458 num_args = cum->num_args;
5459 if (num_args >= 6
5460 || targetm.calls.must_pass_in_stack (mode, type))
5461 return NULL_RTX;
5462 }
5463 #elif TARGET_ABI_OSF
5464 {
5465 if (*cum >= 6)
5466 return NULL_RTX;
5467 num_args = *cum;
5468
5469 /* VOID is passed as a special flag for "last argument". */
5470 if (type == void_type_node)
5471 basereg = 16;
5472 else if (targetm.calls.must_pass_in_stack (mode, type))
5473 return NULL_RTX;
5474 }
5475 #else
5476 #error Unhandled ABI
5477 #endif
5478
5479 return gen_rtx_REG (mode, num_args + basereg);
5480 }
5481
5482 /* Update the data in CUM to advance over an argument
5483 of mode MODE and data type TYPE.
5484 (TYPE is null for libcalls where that information may not be available.) */
5485
5486 static void
5487 alpha_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5488 const_tree type, bool named ATTRIBUTE_UNUSED)
5489 {
5490 bool onstack = targetm.calls.must_pass_in_stack (mode, type);
5491 int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
5492
5493 #if TARGET_ABI_OSF
5494 *cum += increment;
5495 #else
5496 if (!onstack && cum->num_args < 6)
5497 cum->atypes[cum->num_args] = alpha_arg_type (mode);
5498 cum->num_args += increment;
5499 #endif
5500 }
5501
5502 static int
5503 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5504 enum machine_mode mode ATTRIBUTE_UNUSED,
5505 tree type ATTRIBUTE_UNUSED,
5506 bool named ATTRIBUTE_UNUSED)
5507 {
5508 int words = 0;
5509
5510 #if TARGET_ABI_OPEN_VMS
5511 if (cum->num_args < 6
5512 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5513 words = 6 - cum->num_args;
5514 #elif TARGET_ABI_OSF
5515 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5516 words = 6 - *cum;
5517 #else
5518 #error Unhandled ABI
5519 #endif
5520
5521 return words * UNITS_PER_WORD;
5522 }
5523
5524
5525 /* Return true if TYPE must be returned in memory, instead of in registers. */
5526
5527 static bool
5528 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5529 {
5530 enum machine_mode mode = VOIDmode;
5531 int size;
5532
5533 if (type)
5534 {
5535 mode = TYPE_MODE (type);
5536
5537 /* All aggregates are returned in memory, except on OpenVMS where
5538 records that fit 64 bits should be returned by immediate value
5539 as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
5540 if (TARGET_ABI_OPEN_VMS
5541 && TREE_CODE (type) != ARRAY_TYPE
5542 && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
5543 return false;
5544
5545 if (AGGREGATE_TYPE_P (type))
5546 return true;
5547 }
5548
5549 size = GET_MODE_SIZE (mode);
5550 switch (GET_MODE_CLASS (mode))
5551 {
5552 case MODE_VECTOR_FLOAT:
5553 /* Pass all float vectors in memory, like an aggregate. */
5554 return true;
5555
5556 case MODE_COMPLEX_FLOAT:
5557 /* We judge complex floats on the size of their element,
5558 not the size of the whole type. */
5559 size = GET_MODE_UNIT_SIZE (mode);
5560 break;
5561
5562 case MODE_INT:
5563 case MODE_FLOAT:
5564 case MODE_COMPLEX_INT:
5565 case MODE_VECTOR_INT:
5566 break;
5567
5568 default:
5569 /* ??? We get called on all sorts of random stuff from
5570 aggregate_value_p. We must return something, but it's not
5571 clear what's safe to return. Pretend it's a struct I
5572 guess. */
5573 return true;
5574 }
5575
5576 /* Otherwise types must fit in one register. */
5577 return size > UNITS_PER_WORD;
5578 }
5579
5580 /* Return true if TYPE should be passed by invisible reference. */
5581
5582 static bool
5583 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5584 enum machine_mode mode,
5585 const_tree type ATTRIBUTE_UNUSED,
5586 bool named ATTRIBUTE_UNUSED)
5587 {
5588 return mode == TFmode || mode == TCmode;
5589 }
5590
5591 /* Define how to find the value returned by a function. VALTYPE is the
5592 data type of the value (as a tree). If the precise function being
5593 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5594 MODE is set instead of VALTYPE for libcalls.
5595
5596 On Alpha the value is found in $0 for integer functions and
5597 $f0 for floating-point functions. */
5598
5599 rtx
5600 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5601 enum machine_mode mode)
5602 {
5603 unsigned int regnum, dummy ATTRIBUTE_UNUSED;
5604 enum mode_class mclass;
5605
5606 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5607
5608 if (valtype)
5609 mode = TYPE_MODE (valtype);
5610
5611 mclass = GET_MODE_CLASS (mode);
5612 switch (mclass)
5613 {
5614 case MODE_INT:
5615 /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
5616 where we have them returning both SImode and DImode. */
5617 if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
5618 PROMOTE_MODE (mode, dummy, valtype);
5619 /* FALLTHRU */
5620
5621 case MODE_COMPLEX_INT:
5622 case MODE_VECTOR_INT:
5623 regnum = 0;
5624 break;
5625
5626 case MODE_FLOAT:
5627 regnum = 32;
5628 break;
5629
5630 case MODE_COMPLEX_FLOAT:
5631 {
5632 enum machine_mode cmode = GET_MODE_INNER (mode);
5633
5634 return gen_rtx_PARALLEL
5635 (VOIDmode,
5636 gen_rtvec (2,
5637 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5638 const0_rtx),
5639 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5640 GEN_INT (GET_MODE_SIZE (cmode)))));
5641 }
5642
5643 case MODE_RANDOM:
5644 /* We should only reach here for BLKmode on VMS. */
5645 gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
5646 regnum = 0;
5647 break;
5648
5649 default:
5650 gcc_unreachable ();
5651 }
5652
5653 return gen_rtx_REG (mode, regnum);
5654 }
5655
5656 /* TCmode complex values are passed by invisible reference. We
5657 should not split these values. */
5658
5659 static bool
5660 alpha_split_complex_arg (const_tree type)
5661 {
5662 return TYPE_MODE (type) != TCmode;
5663 }
5664
5665 static tree
5666 alpha_build_builtin_va_list (void)
5667 {
5668 tree base, ofs, space, record, type_decl;
5669
5670 if (TARGET_ABI_OPEN_VMS)
5671 return ptr_type_node;
5672
5673 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5674 type_decl = build_decl (BUILTINS_LOCATION,
5675 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5676 TYPE_STUB_DECL (record) = type_decl;
5677 TYPE_NAME (record) = type_decl;
5678
5679 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5680
5681 /* Dummy field to prevent alignment warnings. */
5682 space = build_decl (BUILTINS_LOCATION,
5683 FIELD_DECL, NULL_TREE, integer_type_node);
5684 DECL_FIELD_CONTEXT (space) = record;
5685 DECL_ARTIFICIAL (space) = 1;
5686 DECL_IGNORED_P (space) = 1;
5687
5688 ofs = build_decl (BUILTINS_LOCATION,
5689 FIELD_DECL, get_identifier ("__offset"),
5690 integer_type_node);
5691 DECL_FIELD_CONTEXT (ofs) = record;
5692 DECL_CHAIN (ofs) = space;
5693 /* ??? This is a hack, __offset is marked volatile to prevent
5694 DCE that confuses stdarg optimization and results in
5695 gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
5696 TREE_THIS_VOLATILE (ofs) = 1;
5697
5698 base = build_decl (BUILTINS_LOCATION,
5699 FIELD_DECL, get_identifier ("__base"),
5700 ptr_type_node);
5701 DECL_FIELD_CONTEXT (base) = record;
5702 DECL_CHAIN (base) = ofs;
5703
5704 TYPE_FIELDS (record) = base;
5705 layout_type (record);
5706
5707 va_list_gpr_counter_field = ofs;
5708 return record;
5709 }
5710
5711 #if TARGET_ABI_OSF
5712 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5713 and constant additions. */
5714
5715 static gimple
5716 va_list_skip_additions (tree lhs)
5717 {
5718 gimple stmt;
5719
5720 for (;;)
5721 {
5722 enum tree_code code;
5723
5724 stmt = SSA_NAME_DEF_STMT (lhs);
5725
5726 if (gimple_code (stmt) == GIMPLE_PHI)
5727 return stmt;
5728
5729 if (!is_gimple_assign (stmt)
5730 || gimple_assign_lhs (stmt) != lhs)
5731 return NULL;
5732
5733 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5734 return stmt;
5735 code = gimple_assign_rhs_code (stmt);
5736 if (!CONVERT_EXPR_CODE_P (code)
5737 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5738 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5739 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5740 return stmt;
5741
5742 lhs = gimple_assign_rhs1 (stmt);
5743 }
5744 }
5745
5746 /* Check if LHS = RHS statement is
5747 LHS = *(ap.__base + ap.__offset + cst)
5748 or
5749 LHS = *(ap.__base
5750 + ((ap.__offset + cst <= 47)
5751 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5752 If the former, indicate that GPR registers are needed,
5753 if the latter, indicate that FPR registers are needed.
5754
5755 Also look for LHS = (*ptr).field, where ptr is one of the forms
5756 listed above.
5757
5758 On alpha, cfun->va_list_gpr_size is used as size of the needed
5759 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5760 registers are needed and bit 1 set if FPR registers are needed.
5761 Return true if va_list references should not be scanned for the
5762 current statement. */
5763
5764 static bool
5765 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5766 {
5767 tree base, offset, rhs;
5768 int offset_arg = 1;
5769 gimple base_stmt;
5770
5771 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5772 != GIMPLE_SINGLE_RHS)
5773 return false;
5774
5775 rhs = gimple_assign_rhs1 (stmt);
5776 while (handled_component_p (rhs))
5777 rhs = TREE_OPERAND (rhs, 0);
5778 if (TREE_CODE (rhs) != MEM_REF
5779 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5780 return false;
5781
5782 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5783 if (stmt == NULL
5784 || !is_gimple_assign (stmt)
5785 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5786 return false;
5787
5788 base = gimple_assign_rhs1 (stmt);
5789 if (TREE_CODE (base) == SSA_NAME)
5790 {
5791 base_stmt = va_list_skip_additions (base);
5792 if (base_stmt
5793 && is_gimple_assign (base_stmt)
5794 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5795 base = gimple_assign_rhs1 (base_stmt);
5796 }
5797
5798 if (TREE_CODE (base) != COMPONENT_REF
5799 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5800 {
5801 base = gimple_assign_rhs2 (stmt);
5802 if (TREE_CODE (base) == SSA_NAME)
5803 {
5804 base_stmt = va_list_skip_additions (base);
5805 if (base_stmt
5806 && is_gimple_assign (base_stmt)
5807 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5808 base = gimple_assign_rhs1 (base_stmt);
5809 }
5810
5811 if (TREE_CODE (base) != COMPONENT_REF
5812 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5813 return false;
5814
5815 offset_arg = 0;
5816 }
5817
5818 base = get_base_address (base);
5819 if (TREE_CODE (base) != VAR_DECL
5820 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5821 return false;
5822
5823 offset = gimple_op (stmt, 1 + offset_arg);
5824 if (TREE_CODE (offset) == SSA_NAME)
5825 {
5826 gimple offset_stmt = va_list_skip_additions (offset);
5827
5828 if (offset_stmt
5829 && gimple_code (offset_stmt) == GIMPLE_PHI)
5830 {
5831 HOST_WIDE_INT sub;
5832 gimple arg1_stmt, arg2_stmt;
5833 tree arg1, arg2;
5834 enum tree_code code1, code2;
5835
5836 if (gimple_phi_num_args (offset_stmt) != 2)
5837 goto escapes;
5838
5839 arg1_stmt
5840 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5841 arg2_stmt
5842 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5843 if (arg1_stmt == NULL
5844 || !is_gimple_assign (arg1_stmt)
5845 || arg2_stmt == NULL
5846 || !is_gimple_assign (arg2_stmt))
5847 goto escapes;
5848
5849 code1 = gimple_assign_rhs_code (arg1_stmt);
5850 code2 = gimple_assign_rhs_code (arg2_stmt);
5851 if (code1 == COMPONENT_REF
5852 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5853 /* Do nothing. */;
5854 else if (code2 == COMPONENT_REF
5855 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5856 {
5857 gimple tem = arg1_stmt;
5858 code2 = code1;
5859 arg1_stmt = arg2_stmt;
5860 arg2_stmt = tem;
5861 }
5862 else
5863 goto escapes;
5864
5865 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5866 goto escapes;
5867
5868 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5869 if (code2 == MINUS_EXPR)
5870 sub = -sub;
5871 if (sub < -48 || sub > -32)
5872 goto escapes;
5873
5874 arg1 = gimple_assign_rhs1 (arg1_stmt);
5875 arg2 = gimple_assign_rhs1 (arg2_stmt);
5876 if (TREE_CODE (arg2) == SSA_NAME)
5877 {
5878 arg2_stmt = va_list_skip_additions (arg2);
5879 if (arg2_stmt == NULL
5880 || !is_gimple_assign (arg2_stmt)
5881 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5882 goto escapes;
5883 arg2 = gimple_assign_rhs1 (arg2_stmt);
5884 }
5885 if (arg1 != arg2)
5886 goto escapes;
5887
5888 if (TREE_CODE (arg1) != COMPONENT_REF
5889 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5890 || get_base_address (arg1) != base)
5891 goto escapes;
5892
5893 /* Need floating point regs. */
5894 cfun->va_list_fpr_size |= 2;
5895 return false;
5896 }
5897 if (offset_stmt
5898 && is_gimple_assign (offset_stmt)
5899 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
5900 offset = gimple_assign_rhs1 (offset_stmt);
5901 }
5902 if (TREE_CODE (offset) != COMPONENT_REF
5903 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5904 || get_base_address (offset) != base)
5905 goto escapes;
5906 else
5907 /* Need general regs. */
5908 cfun->va_list_fpr_size |= 1;
5909 return false;
5910
5911 escapes:
5912 si->va_list_escapes = true;
5913 return false;
5914 }
5915 #endif
5916
5917 /* Perform any needed actions needed for a function that is receiving a
5918 variable number of arguments. */
5919
5920 static void
5921 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5922 tree type, int *pretend_size, int no_rtl)
5923 {
5924 CUMULATIVE_ARGS cum = *pcum;
5925
5926 /* Skip the current argument. */
5927 targetm.calls.function_arg_advance (&cum, mode, type, true);
5928
5929 #if TARGET_ABI_OPEN_VMS
5930 /* For VMS, we allocate space for all 6 arg registers plus a count.
5931
5932 However, if NO registers need to be saved, don't allocate any space.
5933 This is not only because we won't need the space, but because AP
5934 includes the current_pretend_args_size and we don't want to mess up
5935 any ap-relative addresses already made. */
5936 if (cum.num_args < 6)
5937 {
5938 if (!no_rtl)
5939 {
5940 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
5941 emit_insn (gen_arg_home ());
5942 }
5943 *pretend_size = 7 * UNITS_PER_WORD;
5944 }
5945 #else
5946 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
5947 only push those that are remaining. However, if NO registers need to
5948 be saved, don't allocate any space. This is not only because we won't
5949 need the space, but because AP includes the current_pretend_args_size
5950 and we don't want to mess up any ap-relative addresses already made.
5951
5952 If we are not to use the floating-point registers, save the integer
5953 registers where we would put the floating-point registers. This is
5954 not the most efficient way to implement varargs with just one register
5955 class, but it isn't worth doing anything more efficient in this rare
5956 case. */
5957 if (cum >= 6)
5958 return;
5959
5960 if (!no_rtl)
5961 {
5962 int count;
5963 alias_set_type set = get_varargs_alias_set ();
5964 rtx tmp;
5965
5966 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
5967 if (count > 6 - cum)
5968 count = 6 - cum;
5969
5970 /* Detect whether integer registers or floating-point registers
5971 are needed by the detected va_arg statements. See above for
5972 how these values are computed. Note that the "escape" value
5973 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
5974 these bits set. */
5975 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
5976
5977 if (cfun->va_list_fpr_size & 1)
5978 {
5979 tmp = gen_rtx_MEM (BLKmode,
5980 plus_constant (virtual_incoming_args_rtx,
5981 (cum + 6) * UNITS_PER_WORD));
5982 MEM_NOTRAP_P (tmp) = 1;
5983 set_mem_alias_set (tmp, set);
5984 move_block_from_reg (16 + cum, tmp, count);
5985 }
5986
5987 if (cfun->va_list_fpr_size & 2)
5988 {
5989 tmp = gen_rtx_MEM (BLKmode,
5990 plus_constant (virtual_incoming_args_rtx,
5991 cum * UNITS_PER_WORD));
5992 MEM_NOTRAP_P (tmp) = 1;
5993 set_mem_alias_set (tmp, set);
5994 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
5995 }
5996 }
5997 *pretend_size = 12 * UNITS_PER_WORD;
5998 #endif
5999 }
6000
6001 static void
6002 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6003 {
6004 HOST_WIDE_INT offset;
6005 tree t, offset_field, base_field;
6006
6007 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6008 return;
6009
6010 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6011 up by 48, storing fp arg registers in the first 48 bytes, and the
6012 integer arg registers in the next 48 bytes. This is only done,
6013 however, if any integer registers need to be stored.
6014
6015 If no integer registers need be stored, then we must subtract 48
6016 in order to account for the integer arg registers which are counted
6017 in argsize above, but which are not actually stored on the stack.
6018 Must further be careful here about structures straddling the last
6019 integer argument register; that futzes with pretend_args_size,
6020 which changes the meaning of AP. */
6021
6022 if (NUM_ARGS < 6)
6023 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6024 else
6025 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6026
6027 if (TARGET_ABI_OPEN_VMS)
6028 {
6029 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6030 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6031 size_int (offset + NUM_ARGS * UNITS_PER_WORD));
6032 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
6033 TREE_SIDE_EFFECTS (t) = 1;
6034 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6035 }
6036 else
6037 {
6038 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6039 offset_field = DECL_CHAIN (base_field);
6040
6041 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6042 valist, base_field, NULL_TREE);
6043 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6044 valist, offset_field, NULL_TREE);
6045
6046 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6047 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6048 size_int (offset));
6049 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6050 TREE_SIDE_EFFECTS (t) = 1;
6051 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6052
6053 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6054 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6055 TREE_SIDE_EFFECTS (t) = 1;
6056 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6057 }
6058 }
6059
6060 static tree
6061 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6062 gimple_seq *pre_p)
6063 {
6064 tree type_size, ptr_type, addend, t, addr;
6065 gimple_seq internal_post;
6066
6067 /* If the type could not be passed in registers, skip the block
6068 reserved for the registers. */
6069 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6070 {
6071 t = build_int_cst (TREE_TYPE (offset), 6*8);
6072 gimplify_assign (offset,
6073 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6074 pre_p);
6075 }
6076
6077 addend = offset;
6078 ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
6079
6080 if (TREE_CODE (type) == COMPLEX_TYPE)
6081 {
6082 tree real_part, imag_part, real_temp;
6083
6084 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6085 offset, pre_p);
6086
6087 /* Copy the value into a new temporary, lest the formal temporary
6088 be reused out from under us. */
6089 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6090
6091 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6092 offset, pre_p);
6093
6094 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6095 }
6096 else if (TREE_CODE (type) == REAL_TYPE)
6097 {
6098 tree fpaddend, cond, fourtyeight;
6099
6100 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6101 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6102 addend, fourtyeight);
6103 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6104 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6105 fpaddend, addend);
6106 }
6107
6108 /* Build the final address and force that value into a temporary. */
6109 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6110 fold_convert (sizetype, addend));
6111 internal_post = NULL;
6112 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6113 gimple_seq_add_seq (pre_p, internal_post);
6114
6115 /* Update the offset field. */
6116 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6117 if (type_size == NULL || TREE_OVERFLOW (type_size))
6118 t = size_zero_node;
6119 else
6120 {
6121 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6122 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6123 t = size_binop (MULT_EXPR, t, size_int (8));
6124 }
6125 t = fold_convert (TREE_TYPE (offset), t);
6126 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6127 pre_p);
6128
6129 return build_va_arg_indirect_ref (addr);
6130 }
6131
6132 static tree
6133 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6134 gimple_seq *post_p)
6135 {
6136 tree offset_field, base_field, offset, base, t, r;
6137 bool indirect;
6138
6139 if (TARGET_ABI_OPEN_VMS)
6140 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6141
6142 base_field = TYPE_FIELDS (va_list_type_node);
6143 offset_field = DECL_CHAIN (base_field);
6144 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6145 valist, base_field, NULL_TREE);
6146 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6147 valist, offset_field, NULL_TREE);
6148
6149 /* Pull the fields of the structure out into temporaries. Since we never
6150 modify the base field, we can use a formal temporary. Sign-extend the
6151 offset field so that it's the proper width for pointer arithmetic. */
6152 base = get_formal_tmp_var (base_field, pre_p);
6153
6154 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6155 offset = get_initialized_tmp_var (t, pre_p, NULL);
6156
6157 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6158 if (indirect)
6159 type = build_pointer_type_for_mode (type, ptr_mode, true);
6160
6161 /* Find the value. Note that this will be a stable indirection, or
6162 a composite of stable indirections in the case of complex. */
6163 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6164
6165 /* Stuff the offset temporary back into its field. */
6166 gimplify_assign (unshare_expr (offset_field),
6167 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6168
6169 if (indirect)
6170 r = build_va_arg_indirect_ref (r);
6171
6172 return r;
6173 }
6174 \f
6175 /* Builtins. */
6176
6177 enum alpha_builtin
6178 {
6179 ALPHA_BUILTIN_CMPBGE,
6180 ALPHA_BUILTIN_EXTBL,
6181 ALPHA_BUILTIN_EXTWL,
6182 ALPHA_BUILTIN_EXTLL,
6183 ALPHA_BUILTIN_EXTQL,
6184 ALPHA_BUILTIN_EXTWH,
6185 ALPHA_BUILTIN_EXTLH,
6186 ALPHA_BUILTIN_EXTQH,
6187 ALPHA_BUILTIN_INSBL,
6188 ALPHA_BUILTIN_INSWL,
6189 ALPHA_BUILTIN_INSLL,
6190 ALPHA_BUILTIN_INSQL,
6191 ALPHA_BUILTIN_INSWH,
6192 ALPHA_BUILTIN_INSLH,
6193 ALPHA_BUILTIN_INSQH,
6194 ALPHA_BUILTIN_MSKBL,
6195 ALPHA_BUILTIN_MSKWL,
6196 ALPHA_BUILTIN_MSKLL,
6197 ALPHA_BUILTIN_MSKQL,
6198 ALPHA_BUILTIN_MSKWH,
6199 ALPHA_BUILTIN_MSKLH,
6200 ALPHA_BUILTIN_MSKQH,
6201 ALPHA_BUILTIN_UMULH,
6202 ALPHA_BUILTIN_ZAP,
6203 ALPHA_BUILTIN_ZAPNOT,
6204 ALPHA_BUILTIN_AMASK,
6205 ALPHA_BUILTIN_IMPLVER,
6206 ALPHA_BUILTIN_RPCC,
6207 ALPHA_BUILTIN_THREAD_POINTER,
6208 ALPHA_BUILTIN_SET_THREAD_POINTER,
6209 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6210 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
6211
6212 /* TARGET_MAX */
6213 ALPHA_BUILTIN_MINUB8,
6214 ALPHA_BUILTIN_MINSB8,
6215 ALPHA_BUILTIN_MINUW4,
6216 ALPHA_BUILTIN_MINSW4,
6217 ALPHA_BUILTIN_MAXUB8,
6218 ALPHA_BUILTIN_MAXSB8,
6219 ALPHA_BUILTIN_MAXUW4,
6220 ALPHA_BUILTIN_MAXSW4,
6221 ALPHA_BUILTIN_PERR,
6222 ALPHA_BUILTIN_PKLB,
6223 ALPHA_BUILTIN_PKWB,
6224 ALPHA_BUILTIN_UNPKBL,
6225 ALPHA_BUILTIN_UNPKBW,
6226
6227 /* TARGET_CIX */
6228 ALPHA_BUILTIN_CTTZ,
6229 ALPHA_BUILTIN_CTLZ,
6230 ALPHA_BUILTIN_CTPOP,
6231
6232 ALPHA_BUILTIN_max
6233 };
6234
6235 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6236 CODE_FOR_builtin_cmpbge,
6237 CODE_FOR_extbl,
6238 CODE_FOR_extwl,
6239 CODE_FOR_extll,
6240 CODE_FOR_extql,
6241 CODE_FOR_extwh,
6242 CODE_FOR_extlh,
6243 CODE_FOR_extqh,
6244 CODE_FOR_builtin_insbl,
6245 CODE_FOR_builtin_inswl,
6246 CODE_FOR_builtin_insll,
6247 CODE_FOR_insql,
6248 CODE_FOR_inswh,
6249 CODE_FOR_inslh,
6250 CODE_FOR_insqh,
6251 CODE_FOR_mskbl,
6252 CODE_FOR_mskwl,
6253 CODE_FOR_mskll,
6254 CODE_FOR_mskql,
6255 CODE_FOR_mskwh,
6256 CODE_FOR_msklh,
6257 CODE_FOR_mskqh,
6258 CODE_FOR_umuldi3_highpart,
6259 CODE_FOR_builtin_zap,
6260 CODE_FOR_builtin_zapnot,
6261 CODE_FOR_builtin_amask,
6262 CODE_FOR_builtin_implver,
6263 CODE_FOR_builtin_rpcc,
6264 CODE_FOR_load_tp,
6265 CODE_FOR_set_tp,
6266 CODE_FOR_builtin_establish_vms_condition_handler,
6267 CODE_FOR_builtin_revert_vms_condition_handler,
6268
6269 /* TARGET_MAX */
6270 CODE_FOR_builtin_minub8,
6271 CODE_FOR_builtin_minsb8,
6272 CODE_FOR_builtin_minuw4,
6273 CODE_FOR_builtin_minsw4,
6274 CODE_FOR_builtin_maxub8,
6275 CODE_FOR_builtin_maxsb8,
6276 CODE_FOR_builtin_maxuw4,
6277 CODE_FOR_builtin_maxsw4,
6278 CODE_FOR_builtin_perr,
6279 CODE_FOR_builtin_pklb,
6280 CODE_FOR_builtin_pkwb,
6281 CODE_FOR_builtin_unpkbl,
6282 CODE_FOR_builtin_unpkbw,
6283
6284 /* TARGET_CIX */
6285 CODE_FOR_ctzdi2,
6286 CODE_FOR_clzdi2,
6287 CODE_FOR_popcountdi2
6288 };
6289
6290 struct alpha_builtin_def
6291 {
6292 const char *name;
6293 enum alpha_builtin code;
6294 unsigned int target_mask;
6295 bool is_const;
6296 };
6297
6298 static struct alpha_builtin_def const zero_arg_builtins[] = {
6299 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6300 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6301 };
6302
6303 static struct alpha_builtin_def const one_arg_builtins[] = {
6304 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6305 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6306 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6307 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6308 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6309 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6310 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6311 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6312 };
6313
6314 static struct alpha_builtin_def const two_arg_builtins[] = {
6315 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6316 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6317 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6318 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6319 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6320 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6321 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6322 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6323 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6324 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6325 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6326 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6327 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6328 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6329 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6330 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6331 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6332 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6333 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6334 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6335 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6336 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6337 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6338 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6339 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6340 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6341 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6342 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6343 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6344 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6345 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6346 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6347 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6348 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6349 };
6350
6351 static GTY(()) tree alpha_v8qi_u;
6352 static GTY(()) tree alpha_v8qi_s;
6353 static GTY(()) tree alpha_v4hi_u;
6354 static GTY(()) tree alpha_v4hi_s;
6355
6356 static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
6357
6358 /* Return the alpha builtin for CODE. */
6359
6360 static tree
6361 alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
6362 {
6363 if (code >= ALPHA_BUILTIN_max)
6364 return error_mark_node;
6365 return alpha_builtins[code];
6366 }
6367
6368 /* Helper function of alpha_init_builtins. Add the built-in specified
6369 by NAME, TYPE, CODE, and ECF. */
6370
6371 static void
6372 alpha_builtin_function (const char *name, tree ftype,
6373 enum alpha_builtin code, unsigned ecf)
6374 {
6375 tree decl = add_builtin_function (name, ftype, (int) code,
6376 BUILT_IN_MD, NULL, NULL_TREE);
6377
6378 if (ecf & ECF_CONST)
6379 TREE_READONLY (decl) = 1;
6380 if (ecf & ECF_NOTHROW)
6381 TREE_NOTHROW (decl) = 1;
6382
6383 alpha_builtins [(int) code] = decl;
6384 }
6385
6386 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6387 functions pointed to by P, with function type FTYPE. */
6388
6389 static void
6390 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6391 tree ftype)
6392 {
6393 size_t i;
6394
6395 for (i = 0; i < count; ++i, ++p)
6396 if ((target_flags & p->target_mask) == p->target_mask)
6397 alpha_builtin_function (p->name, ftype, p->code,
6398 (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
6399 }
6400
6401 static void
6402 alpha_init_builtins (void)
6403 {
6404 tree dimode_integer_type_node;
6405 tree ftype;
6406
6407 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6408
6409 /* Fwrite on VMS is non-standard. */
6410 #if TARGET_ABI_OPEN_VMS
6411 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
6412 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
6413 #endif
6414
6415 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6416 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6417 ftype);
6418
6419 ftype = build_function_type_list (dimode_integer_type_node,
6420 dimode_integer_type_node, NULL_TREE);
6421 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6422 ftype);
6423
6424 ftype = build_function_type_list (dimode_integer_type_node,
6425 dimode_integer_type_node,
6426 dimode_integer_type_node, NULL_TREE);
6427 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6428 ftype);
6429
6430 ftype = build_function_type (ptr_type_node, void_list_node);
6431 alpha_builtin_function ("__builtin_thread_pointer", ftype,
6432 ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
6433
6434 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6435 alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
6436 ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
6437
6438 if (TARGET_ABI_OPEN_VMS)
6439 {
6440 ftype = build_function_type_list (ptr_type_node, ptr_type_node,
6441 NULL_TREE);
6442 alpha_builtin_function ("__builtin_establish_vms_condition_handler",
6443 ftype,
6444 ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
6445 0);
6446
6447 ftype = build_function_type_list (ptr_type_node, void_type_node,
6448 NULL_TREE);
6449 alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
6450 ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
6451 }
6452
6453 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6454 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6455 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6456 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6457 }
6458
6459 /* Expand an expression EXP that calls a built-in function,
6460 with result going to TARGET if that's convenient
6461 (and in mode MODE if that's convenient).
6462 SUBTARGET may be used as the target for computing one of EXP's operands.
6463 IGNORE is nonzero if the value is to be ignored. */
6464
6465 static rtx
6466 alpha_expand_builtin (tree exp, rtx target,
6467 rtx subtarget ATTRIBUTE_UNUSED,
6468 enum machine_mode mode ATTRIBUTE_UNUSED,
6469 int ignore ATTRIBUTE_UNUSED)
6470 {
6471 #define MAX_ARGS 2
6472
6473 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6474 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6475 tree arg;
6476 call_expr_arg_iterator iter;
6477 enum insn_code icode;
6478 rtx op[MAX_ARGS], pat;
6479 int arity;
6480 bool nonvoid;
6481
6482 if (fcode >= ALPHA_BUILTIN_max)
6483 internal_error ("bad builtin fcode");
6484 icode = code_for_builtin[fcode];
6485 if (icode == 0)
6486 internal_error ("bad builtin fcode");
6487
6488 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6489
6490 arity = 0;
6491 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6492 {
6493 const struct insn_operand_data *insn_op;
6494
6495 if (arg == error_mark_node)
6496 return NULL_RTX;
6497 if (arity > MAX_ARGS)
6498 return NULL_RTX;
6499
6500 insn_op = &insn_data[icode].operand[arity + nonvoid];
6501
6502 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6503
6504 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6505 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6506 arity++;
6507 }
6508
6509 if (nonvoid)
6510 {
6511 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6512 if (!target
6513 || GET_MODE (target) != tmode
6514 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6515 target = gen_reg_rtx (tmode);
6516 }
6517
6518 switch (arity)
6519 {
6520 case 0:
6521 pat = GEN_FCN (icode) (target);
6522 break;
6523 case 1:
6524 if (nonvoid)
6525 pat = GEN_FCN (icode) (target, op[0]);
6526 else
6527 pat = GEN_FCN (icode) (op[0]);
6528 break;
6529 case 2:
6530 pat = GEN_FCN (icode) (target, op[0], op[1]);
6531 break;
6532 default:
6533 gcc_unreachable ();
6534 }
6535 if (!pat)
6536 return NULL_RTX;
6537 emit_insn (pat);
6538
6539 if (nonvoid)
6540 return target;
6541 else
6542 return const0_rtx;
6543 }
6544
6545
6546 /* Several bits below assume HWI >= 64 bits. This should be enforced
6547 by config.gcc. */
6548 #if HOST_BITS_PER_WIDE_INT < 64
6549 # error "HOST_WIDE_INT too small"
6550 #endif
6551
6552 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6553 with an 8-bit output vector. OPINT contains the integer operands; bit N
6554 of OP_CONST is set if OPINT[N] is valid. */
6555
6556 static tree
6557 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6558 {
6559 if (op_const == 3)
6560 {
6561 int i, val;
6562 for (i = 0, val = 0; i < 8; ++i)
6563 {
6564 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6565 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6566 if (c0 >= c1)
6567 val |= 1 << i;
6568 }
6569 return build_int_cst (long_integer_type_node, val);
6570 }
6571 else if (op_const == 2 && opint[1] == 0)
6572 return build_int_cst (long_integer_type_node, 0xff);
6573 return NULL;
6574 }
6575
6576 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6577 specialized form of an AND operation. Other byte manipulation instructions
6578 are defined in terms of this instruction, so this is also used as a
6579 subroutine for other builtins.
6580
6581 OP contains the tree operands; OPINT contains the extracted integer values.
6582 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6583 OPINT may be considered. */
6584
6585 static tree
6586 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6587 long op_const)
6588 {
6589 if (op_const & 2)
6590 {
6591 unsigned HOST_WIDE_INT mask = 0;
6592 int i;
6593
6594 for (i = 0; i < 8; ++i)
6595 if ((opint[1] >> i) & 1)
6596 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6597
6598 if (op_const & 1)
6599 return build_int_cst (long_integer_type_node, opint[0] & mask);
6600
6601 if (op)
6602 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6603 build_int_cst (long_integer_type_node, mask));
6604 }
6605 else if ((op_const & 1) && opint[0] == 0)
6606 return build_int_cst (long_integer_type_node, 0);
6607 return NULL;
6608 }
6609
6610 /* Fold the builtins for the EXT family of instructions. */
6611
6612 static tree
6613 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6614 long op_const, unsigned HOST_WIDE_INT bytemask,
6615 bool is_high)
6616 {
6617 long zap_const = 2;
6618 tree *zap_op = NULL;
6619
6620 if (op_const & 2)
6621 {
6622 unsigned HOST_WIDE_INT loc;
6623
6624 loc = opint[1] & 7;
6625 loc *= BITS_PER_UNIT;
6626
6627 if (loc != 0)
6628 {
6629 if (op_const & 1)
6630 {
6631 unsigned HOST_WIDE_INT temp = opint[0];
6632 if (is_high)
6633 temp <<= loc;
6634 else
6635 temp >>= loc;
6636 opint[0] = temp;
6637 zap_const = 3;
6638 }
6639 }
6640 else
6641 zap_op = op;
6642 }
6643
6644 opint[1] = bytemask;
6645 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6646 }
6647
6648 /* Fold the builtins for the INS family of instructions. */
6649
6650 static tree
6651 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6652 long op_const, unsigned HOST_WIDE_INT bytemask,
6653 bool is_high)
6654 {
6655 if ((op_const & 1) && opint[0] == 0)
6656 return build_int_cst (long_integer_type_node, 0);
6657
6658 if (op_const & 2)
6659 {
6660 unsigned HOST_WIDE_INT temp, loc, byteloc;
6661 tree *zap_op = NULL;
6662
6663 loc = opint[1] & 7;
6664 bytemask <<= loc;
6665
6666 temp = opint[0];
6667 if (is_high)
6668 {
6669 byteloc = (64 - (loc * 8)) & 0x3f;
6670 if (byteloc == 0)
6671 zap_op = op;
6672 else
6673 temp >>= byteloc;
6674 bytemask >>= 8;
6675 }
6676 else
6677 {
6678 byteloc = loc * 8;
6679 if (byteloc == 0)
6680 zap_op = op;
6681 else
6682 temp <<= byteloc;
6683 }
6684
6685 opint[0] = temp;
6686 opint[1] = bytemask;
6687 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6688 }
6689
6690 return NULL;
6691 }
6692
6693 static tree
6694 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6695 long op_const, unsigned HOST_WIDE_INT bytemask,
6696 bool is_high)
6697 {
6698 if (op_const & 2)
6699 {
6700 unsigned HOST_WIDE_INT loc;
6701
6702 loc = opint[1] & 7;
6703 bytemask <<= loc;
6704
6705 if (is_high)
6706 bytemask >>= 8;
6707
6708 opint[1] = bytemask ^ 0xff;
6709 }
6710
6711 return alpha_fold_builtin_zapnot (op, opint, op_const);
6712 }
6713
6714 static tree
6715 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6716 {
6717 switch (op_const)
6718 {
6719 case 3:
6720 {
6721 unsigned HOST_WIDE_INT l;
6722 HOST_WIDE_INT h;
6723
6724 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6725
6726 #if HOST_BITS_PER_WIDE_INT > 64
6727 # error fixme
6728 #endif
6729
6730 return build_int_cst (long_integer_type_node, h);
6731 }
6732
6733 case 1:
6734 opint[1] = opint[0];
6735 /* FALLTHRU */
6736 case 2:
6737 /* Note that (X*1) >> 64 == 0. */
6738 if (opint[1] == 0 || opint[1] == 1)
6739 return build_int_cst (long_integer_type_node, 0);
6740 break;
6741 }
6742 return NULL;
6743 }
6744
6745 static tree
6746 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6747 {
6748 tree op0 = fold_convert (vtype, op[0]);
6749 tree op1 = fold_convert (vtype, op[1]);
6750 tree val = fold_build2 (code, vtype, op0, op1);
6751 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6752 }
6753
6754 static tree
6755 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6756 {
6757 unsigned HOST_WIDE_INT temp = 0;
6758 int i;
6759
6760 if (op_const != 3)
6761 return NULL;
6762
6763 for (i = 0; i < 8; ++i)
6764 {
6765 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6766 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6767 if (a >= b)
6768 temp += a - b;
6769 else
6770 temp += b - a;
6771 }
6772
6773 return build_int_cst (long_integer_type_node, temp);
6774 }
6775
6776 static tree
6777 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6778 {
6779 unsigned HOST_WIDE_INT temp;
6780
6781 if (op_const == 0)
6782 return NULL;
6783
6784 temp = opint[0] & 0xff;
6785 temp |= (opint[0] >> 24) & 0xff00;
6786
6787 return build_int_cst (long_integer_type_node, temp);
6788 }
6789
6790 static tree
6791 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6792 {
6793 unsigned HOST_WIDE_INT temp;
6794
6795 if (op_const == 0)
6796 return NULL;
6797
6798 temp = opint[0] & 0xff;
6799 temp |= (opint[0] >> 8) & 0xff00;
6800 temp |= (opint[0] >> 16) & 0xff0000;
6801 temp |= (opint[0] >> 24) & 0xff000000;
6802
6803 return build_int_cst (long_integer_type_node, temp);
6804 }
6805
6806 static tree
6807 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6808 {
6809 unsigned HOST_WIDE_INT temp;
6810
6811 if (op_const == 0)
6812 return NULL;
6813
6814 temp = opint[0] & 0xff;
6815 temp |= (opint[0] & 0xff00) << 24;
6816
6817 return build_int_cst (long_integer_type_node, temp);
6818 }
6819
6820 static tree
6821 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6822 {
6823 unsigned HOST_WIDE_INT temp;
6824
6825 if (op_const == 0)
6826 return NULL;
6827
6828 temp = opint[0] & 0xff;
6829 temp |= (opint[0] & 0x0000ff00) << 8;
6830 temp |= (opint[0] & 0x00ff0000) << 16;
6831 temp |= (opint[0] & 0xff000000) << 24;
6832
6833 return build_int_cst (long_integer_type_node, temp);
6834 }
6835
6836 static tree
6837 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6838 {
6839 unsigned HOST_WIDE_INT temp;
6840
6841 if (op_const == 0)
6842 return NULL;
6843
6844 if (opint[0] == 0)
6845 temp = 64;
6846 else
6847 temp = exact_log2 (opint[0] & -opint[0]);
6848
6849 return build_int_cst (long_integer_type_node, temp);
6850 }
6851
6852 static tree
6853 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6854 {
6855 unsigned HOST_WIDE_INT temp;
6856
6857 if (op_const == 0)
6858 return NULL;
6859
6860 if (opint[0] == 0)
6861 temp = 64;
6862 else
6863 temp = 64 - floor_log2 (opint[0]) - 1;
6864
6865 return build_int_cst (long_integer_type_node, temp);
6866 }
6867
6868 static tree
6869 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6870 {
6871 unsigned HOST_WIDE_INT temp, op;
6872
6873 if (op_const == 0)
6874 return NULL;
6875
6876 op = opint[0];
6877 temp = 0;
6878 while (op)
6879 temp++, op &= op - 1;
6880
6881 return build_int_cst (long_integer_type_node, temp);
6882 }
6883
6884 /* Fold one of our builtin functions. */
6885
6886 static tree
6887 alpha_fold_builtin (tree fndecl, int n_args, tree *op,
6888 bool ignore ATTRIBUTE_UNUSED)
6889 {
6890 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6891 long op_const = 0;
6892 int i;
6893
6894 if (n_args >= MAX_ARGS)
6895 return NULL;
6896
6897 for (i = 0; i < n_args; i++)
6898 {
6899 tree arg = op[i];
6900 if (arg == error_mark_node)
6901 return NULL;
6902
6903 opint[i] = 0;
6904 if (TREE_CODE (arg) == INTEGER_CST)
6905 {
6906 op_const |= 1L << i;
6907 opint[i] = int_cst_value (arg);
6908 }
6909 }
6910
6911 switch (DECL_FUNCTION_CODE (fndecl))
6912 {
6913 case ALPHA_BUILTIN_CMPBGE:
6914 return alpha_fold_builtin_cmpbge (opint, op_const);
6915
6916 case ALPHA_BUILTIN_EXTBL:
6917 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6918 case ALPHA_BUILTIN_EXTWL:
6919 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6920 case ALPHA_BUILTIN_EXTLL:
6921 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6922 case ALPHA_BUILTIN_EXTQL:
6923 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6924 case ALPHA_BUILTIN_EXTWH:
6925 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6926 case ALPHA_BUILTIN_EXTLH:
6927 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6928 case ALPHA_BUILTIN_EXTQH:
6929 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6930
6931 case ALPHA_BUILTIN_INSBL:
6932 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6933 case ALPHA_BUILTIN_INSWL:
6934 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6935 case ALPHA_BUILTIN_INSLL:
6936 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6937 case ALPHA_BUILTIN_INSQL:
6938 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6939 case ALPHA_BUILTIN_INSWH:
6940 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6941 case ALPHA_BUILTIN_INSLH:
6942 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6943 case ALPHA_BUILTIN_INSQH:
6944 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6945
6946 case ALPHA_BUILTIN_MSKBL:
6947 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6948 case ALPHA_BUILTIN_MSKWL:
6949 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6950 case ALPHA_BUILTIN_MSKLL:
6951 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6952 case ALPHA_BUILTIN_MSKQL:
6953 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6954 case ALPHA_BUILTIN_MSKWH:
6955 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6956 case ALPHA_BUILTIN_MSKLH:
6957 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6958 case ALPHA_BUILTIN_MSKQH:
6959 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6960
6961 case ALPHA_BUILTIN_UMULH:
6962 return alpha_fold_builtin_umulh (opint, op_const);
6963
6964 case ALPHA_BUILTIN_ZAP:
6965 opint[1] ^= 0xff;
6966 /* FALLTHRU */
6967 case ALPHA_BUILTIN_ZAPNOT:
6968 return alpha_fold_builtin_zapnot (op, opint, op_const);
6969
6970 case ALPHA_BUILTIN_MINUB8:
6971 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
6972 case ALPHA_BUILTIN_MINSB8:
6973 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
6974 case ALPHA_BUILTIN_MINUW4:
6975 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
6976 case ALPHA_BUILTIN_MINSW4:
6977 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
6978 case ALPHA_BUILTIN_MAXUB8:
6979 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
6980 case ALPHA_BUILTIN_MAXSB8:
6981 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
6982 case ALPHA_BUILTIN_MAXUW4:
6983 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
6984 case ALPHA_BUILTIN_MAXSW4:
6985 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
6986
6987 case ALPHA_BUILTIN_PERR:
6988 return alpha_fold_builtin_perr (opint, op_const);
6989 case ALPHA_BUILTIN_PKLB:
6990 return alpha_fold_builtin_pklb (opint, op_const);
6991 case ALPHA_BUILTIN_PKWB:
6992 return alpha_fold_builtin_pkwb (opint, op_const);
6993 case ALPHA_BUILTIN_UNPKBL:
6994 return alpha_fold_builtin_unpkbl (opint, op_const);
6995 case ALPHA_BUILTIN_UNPKBW:
6996 return alpha_fold_builtin_unpkbw (opint, op_const);
6997
6998 case ALPHA_BUILTIN_CTTZ:
6999 return alpha_fold_builtin_cttz (opint, op_const);
7000 case ALPHA_BUILTIN_CTLZ:
7001 return alpha_fold_builtin_ctlz (opint, op_const);
7002 case ALPHA_BUILTIN_CTPOP:
7003 return alpha_fold_builtin_ctpop (opint, op_const);
7004
7005 case ALPHA_BUILTIN_AMASK:
7006 case ALPHA_BUILTIN_IMPLVER:
7007 case ALPHA_BUILTIN_RPCC:
7008 case ALPHA_BUILTIN_THREAD_POINTER:
7009 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7010 /* None of these are foldable at compile-time. */
7011 default:
7012 return NULL;
7013 }
7014 }
7015 \f
7016 /* This page contains routines that are used to determine what the function
7017 prologue and epilogue code will do and write them out. */
7018
7019 /* Compute the size of the save area in the stack. */
7020
7021 /* These variables are used for communication between the following functions.
7022 They indicate various things about the current function being compiled
7023 that are used to tell what kind of prologue, epilogue and procedure
7024 descriptor to generate. */
7025
7026 /* Nonzero if we need a stack procedure. */
7027 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7028 static enum alpha_procedure_types alpha_procedure_type;
7029
7030 /* Register number (either FP or SP) that is used to unwind the frame. */
7031 static int vms_unwind_regno;
7032
7033 /* Register number used to save FP. We need not have one for RA since
7034 we don't modify it for register procedures. This is only defined
7035 for register frame procedures. */
7036 static int vms_save_fp_regno;
7037
7038 /* Register number used to reference objects off our PV. */
7039 static int vms_base_regno;
7040
7041 /* Compute register masks for saved registers. */
7042
7043 static void
7044 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7045 {
7046 unsigned long imask = 0;
7047 unsigned long fmask = 0;
7048 unsigned int i;
7049
7050 /* When outputting a thunk, we don't have valid register life info,
7051 but assemble_start_function wants to output .frame and .mask
7052 directives. */
7053 if (cfun->is_thunk)
7054 {
7055 *imaskP = 0;
7056 *fmaskP = 0;
7057 return;
7058 }
7059
7060 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7061 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7062
7063 /* One for every register we have to save. */
7064 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7065 if (! fixed_regs[i] && ! call_used_regs[i]
7066 && df_regs_ever_live_p (i) && i != REG_RA)
7067 {
7068 if (i < 32)
7069 imask |= (1UL << i);
7070 else
7071 fmask |= (1UL << (i - 32));
7072 }
7073
7074 /* We need to restore these for the handler. */
7075 if (crtl->calls_eh_return)
7076 {
7077 for (i = 0; ; ++i)
7078 {
7079 unsigned regno = EH_RETURN_DATA_REGNO (i);
7080 if (regno == INVALID_REGNUM)
7081 break;
7082 imask |= 1UL << regno;
7083 }
7084 }
7085
7086 /* If any register spilled, then spill the return address also. */
7087 /* ??? This is required by the Digital stack unwind specification
7088 and isn't needed if we're doing Dwarf2 unwinding. */
7089 if (imask || fmask || alpha_ra_ever_killed ())
7090 imask |= (1UL << REG_RA);
7091
7092 *imaskP = imask;
7093 *fmaskP = fmask;
7094 }
7095
7096 int
7097 alpha_sa_size (void)
7098 {
7099 unsigned long mask[2];
7100 int sa_size = 0;
7101 int i, j;
7102
7103 alpha_sa_mask (&mask[0], &mask[1]);
7104
7105 for (j = 0; j < 2; ++j)
7106 for (i = 0; i < 32; ++i)
7107 if ((mask[j] >> i) & 1)
7108 sa_size++;
7109
7110 if (TARGET_ABI_OPEN_VMS)
7111 {
7112 /* Start with a stack procedure if we make any calls (REG_RA used), or
7113 need a frame pointer, with a register procedure if we otherwise need
7114 at least a slot, and with a null procedure in other cases. */
7115 if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
7116 alpha_procedure_type = PT_STACK;
7117 else if (get_frame_size() != 0)
7118 alpha_procedure_type = PT_REGISTER;
7119 else
7120 alpha_procedure_type = PT_NULL;
7121
7122 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7123 made the final decision on stack procedure vs register procedure. */
7124 if (alpha_procedure_type == PT_STACK)
7125 sa_size -= 2;
7126
7127 /* Decide whether to refer to objects off our PV via FP or PV.
7128 If we need FP for something else or if we receive a nonlocal
7129 goto (which expects PV to contain the value), we must use PV.
7130 Otherwise, start by assuming we can use FP. */
7131
7132 vms_base_regno
7133 = (frame_pointer_needed
7134 || cfun->has_nonlocal_label
7135 || alpha_procedure_type == PT_STACK
7136 || crtl->outgoing_args_size)
7137 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7138
7139 /* If we want to copy PV into FP, we need to find some register
7140 in which to save FP. */
7141
7142 vms_save_fp_regno = -1;
7143 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7144 for (i = 0; i < 32; i++)
7145 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7146 vms_save_fp_regno = i;
7147
7148 /* A VMS condition handler requires a stack procedure in our
7149 implementation. (not required by the calling standard). */
7150 if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7151 || cfun->machine->uses_condition_handler)
7152 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7153 else if (alpha_procedure_type == PT_NULL)
7154 vms_base_regno = REG_PV;
7155
7156 /* Stack unwinding should be done via FP unless we use it for PV. */
7157 vms_unwind_regno = (vms_base_regno == REG_PV
7158 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7159
7160 /* If this is a stack procedure, allow space for saving FP, RA and
7161 a condition handler slot if needed. */
7162 if (alpha_procedure_type == PT_STACK)
7163 sa_size += 2 + cfun->machine->uses_condition_handler;
7164 }
7165 else
7166 {
7167 /* Our size must be even (multiple of 16 bytes). */
7168 if (sa_size & 1)
7169 sa_size++;
7170 }
7171
7172 return sa_size * 8;
7173 }
7174
7175 /* Define the offset between two registers, one to be eliminated,
7176 and the other its replacement, at the start of a routine. */
7177
7178 HOST_WIDE_INT
7179 alpha_initial_elimination_offset (unsigned int from,
7180 unsigned int to ATTRIBUTE_UNUSED)
7181 {
7182 HOST_WIDE_INT ret;
7183
7184 ret = alpha_sa_size ();
7185 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7186
7187 switch (from)
7188 {
7189 case FRAME_POINTER_REGNUM:
7190 break;
7191
7192 case ARG_POINTER_REGNUM:
7193 ret += (ALPHA_ROUND (get_frame_size ()
7194 + crtl->args.pretend_args_size)
7195 - crtl->args.pretend_args_size);
7196 break;
7197
7198 default:
7199 gcc_unreachable ();
7200 }
7201
7202 return ret;
7203 }
7204
7205 #if TARGET_ABI_OPEN_VMS
7206
7207 /* Worker function for TARGET_CAN_ELIMINATE. */
7208
7209 static bool
7210 alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
7211 {
7212 /* We need the alpha_procedure_type to decide. Evaluate it now. */
7213 alpha_sa_size ();
7214
7215 switch (alpha_procedure_type)
7216 {
7217 case PT_NULL:
7218 /* NULL procedures have no frame of their own and we only
7219 know how to resolve from the current stack pointer. */
7220 return to == STACK_POINTER_REGNUM;
7221
7222 case PT_REGISTER:
7223 case PT_STACK:
7224 /* We always eliminate except to the stack pointer if there is no
7225 usable frame pointer at hand. */
7226 return (to != STACK_POINTER_REGNUM
7227 || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
7228 }
7229
7230 gcc_unreachable ();
7231 }
7232
7233 /* FROM is to be eliminated for TO. Return the offset so that TO+offset
7234 designates the same location as FROM. */
7235
7236 HOST_WIDE_INT
7237 alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
7238 {
7239 /* The only possible attempts we ever expect are ARG or FRAME_PTR to
7240 HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
7241 on the proper computations and will need the register save area size
7242 in most cases. */
7243
7244 HOST_WIDE_INT sa_size = alpha_sa_size ();
7245
7246 /* PT_NULL procedures have no frame of their own and we only allow
7247 elimination to the stack pointer. This is the argument pointer and we
7248 resolve the soft frame pointer to that as well. */
7249
7250 if (alpha_procedure_type == PT_NULL)
7251 return 0;
7252
7253 /* For a PT_STACK procedure the frame layout looks as follows
7254
7255 -----> decreasing addresses
7256
7257 < size rounded up to 16 | likewise >
7258 --------------#------------------------------+++--------------+++-------#
7259 incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
7260 --------------#---------------------------------------------------------#
7261 ^ ^ ^ ^
7262 ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
7263
7264
7265 PT_REGISTER procedures are similar in that they may have a frame of their
7266 own. They have no regs-sa/pv/outgoing-args area.
7267
7268 We first compute offset to HARD_FRAME_PTR, then add what we need to get
7269 to STACK_PTR if need be. */
7270
7271 {
7272 HOST_WIDE_INT offset;
7273 HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
7274
7275 switch (from)
7276 {
7277 case FRAME_POINTER_REGNUM:
7278 offset = ALPHA_ROUND (sa_size + pv_save_size);
7279 break;
7280 case ARG_POINTER_REGNUM:
7281 offset = (ALPHA_ROUND (sa_size + pv_save_size
7282 + get_frame_size ()
7283 + crtl->args.pretend_args_size)
7284 - crtl->args.pretend_args_size);
7285 break;
7286 default:
7287 gcc_unreachable ();
7288 }
7289
7290 if (to == STACK_POINTER_REGNUM)
7291 offset += ALPHA_ROUND (crtl->outgoing_args_size);
7292
7293 return offset;
7294 }
7295 }
7296
7297 #define COMMON_OBJECT "common_object"
7298
7299 static tree
7300 common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
7301 tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
7302 bool *no_add_attrs ATTRIBUTE_UNUSED)
7303 {
7304 tree decl = *node;
7305 gcc_assert (DECL_P (decl));
7306
7307 DECL_COMMON (decl) = 1;
7308 return NULL_TREE;
7309 }
7310
7311 static const struct attribute_spec vms_attribute_table[] =
7312 {
7313 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7314 affects_type_identity } */
7315 { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
7316 { NULL, 0, 0, false, false, false, NULL, false }
7317 };
7318
7319 void
7320 vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
7321 unsigned HOST_WIDE_INT size,
7322 unsigned int align)
7323 {
7324 tree attr = DECL_ATTRIBUTES (decl);
7325 fprintf (file, "%s", COMMON_ASM_OP);
7326 assemble_name (file, name);
7327 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
7328 /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
7329 fprintf (file, ",%u", align / BITS_PER_UNIT);
7330 if (attr)
7331 {
7332 attr = lookup_attribute (COMMON_OBJECT, attr);
7333 if (attr)
7334 fprintf (file, ",%s",
7335 IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
7336 }
7337 fputc ('\n', file);
7338 }
7339
7340 #undef COMMON_OBJECT
7341
7342 #endif
7343
7344 static int
7345 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7346 {
7347 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7348 }
7349
7350 int
7351 alpha_find_lo_sum_using_gp (rtx insn)
7352 {
7353 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7354 }
7355
7356 static int
7357 alpha_does_function_need_gp (void)
7358 {
7359 rtx insn;
7360
7361 /* The GP being variable is an OSF abi thing. */
7362 if (! TARGET_ABI_OSF)
7363 return 0;
7364
7365 /* We need the gp to load the address of __mcount. */
7366 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7367 return 1;
7368
7369 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7370 if (cfun->is_thunk)
7371 return 1;
7372
7373 /* The nonlocal receiver pattern assumes that the gp is valid for
7374 the nested function. Reasonable because it's almost always set
7375 correctly already. For the cases where that's wrong, make sure
7376 the nested function loads its gp on entry. */
7377 if (crtl->has_nonlocal_goto)
7378 return 1;
7379
7380 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7381 Even if we are a static function, we still need to do this in case
7382 our address is taken and passed to something like qsort. */
7383
7384 push_topmost_sequence ();
7385 insn = get_insns ();
7386 pop_topmost_sequence ();
7387
7388 for (; insn; insn = NEXT_INSN (insn))
7389 if (NONDEBUG_INSN_P (insn)
7390 && ! JUMP_TABLE_DATA_P (insn)
7391 && GET_CODE (PATTERN (insn)) != USE
7392 && GET_CODE (PATTERN (insn)) != CLOBBER
7393 && get_attr_usegp (insn))
7394 return 1;
7395
7396 return 0;
7397 }
7398
7399 \f
7400 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7401 sequences. */
7402
7403 static rtx
7404 set_frame_related_p (void)
7405 {
7406 rtx seq = get_insns ();
7407 rtx insn;
7408
7409 end_sequence ();
7410
7411 if (!seq)
7412 return NULL_RTX;
7413
7414 if (INSN_P (seq))
7415 {
7416 insn = seq;
7417 while (insn != NULL_RTX)
7418 {
7419 RTX_FRAME_RELATED_P (insn) = 1;
7420 insn = NEXT_INSN (insn);
7421 }
7422 seq = emit_insn (seq);
7423 }
7424 else
7425 {
7426 seq = emit_insn (seq);
7427 RTX_FRAME_RELATED_P (seq) = 1;
7428 }
7429 return seq;
7430 }
7431
7432 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7433
7434 /* Generates a store with the proper unwind info attached. VALUE is
7435 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7436 contains SP+FRAME_BIAS, and that is the unwind info that should be
7437 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7438 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7439
7440 static void
7441 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7442 HOST_WIDE_INT base_ofs, rtx frame_reg)
7443 {
7444 rtx addr, mem, insn;
7445
7446 addr = plus_constant (base_reg, base_ofs);
7447 mem = gen_rtx_MEM (DImode, addr);
7448 set_mem_alias_set (mem, alpha_sr_alias_set);
7449
7450 insn = emit_move_insn (mem, value);
7451 RTX_FRAME_RELATED_P (insn) = 1;
7452
7453 if (frame_bias || value != frame_reg)
7454 {
7455 if (frame_bias)
7456 {
7457 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7458 mem = gen_rtx_MEM (DImode, addr);
7459 }
7460
7461 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7462 gen_rtx_SET (VOIDmode, mem, frame_reg));
7463 }
7464 }
7465
7466 static void
7467 emit_frame_store (unsigned int regno, rtx base_reg,
7468 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7469 {
7470 rtx reg = gen_rtx_REG (DImode, regno);
7471 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7472 }
7473
7474 /* Compute the frame size. SIZE is the size of the "naked" frame
7475 and SA_SIZE is the size of the register save area. */
7476
7477 static HOST_WIDE_INT
7478 compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
7479 {
7480 if (TARGET_ABI_OPEN_VMS)
7481 return ALPHA_ROUND (sa_size
7482 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7483 + size
7484 + crtl->args.pretend_args_size);
7485 else
7486 return ALPHA_ROUND (crtl->outgoing_args_size)
7487 + sa_size
7488 + ALPHA_ROUND (size
7489 + crtl->args.pretend_args_size);
7490 }
7491
7492 /* Write function prologue. */
7493
7494 /* On vms we have two kinds of functions:
7495
7496 - stack frame (PROC_STACK)
7497 these are 'normal' functions with local vars and which are
7498 calling other functions
7499 - register frame (PROC_REGISTER)
7500 keeps all data in registers, needs no stack
7501
7502 We must pass this to the assembler so it can generate the
7503 proper pdsc (procedure descriptor)
7504 This is done with the '.pdesc' command.
7505
7506 On not-vms, we don't really differentiate between the two, as we can
7507 simply allocate stack without saving registers. */
7508
7509 void
7510 alpha_expand_prologue (void)
7511 {
7512 /* Registers to save. */
7513 unsigned long imask = 0;
7514 unsigned long fmask = 0;
7515 /* Stack space needed for pushing registers clobbered by us. */
7516 HOST_WIDE_INT sa_size, sa_bias;
7517 /* Complete stack size needed. */
7518 HOST_WIDE_INT frame_size;
7519 /* Probed stack size; it additionally includes the size of
7520 the "reserve region" if any. */
7521 HOST_WIDE_INT probed_size;
7522 /* Offset from base reg to register save area. */
7523 HOST_WIDE_INT reg_offset;
7524 rtx sa_reg;
7525 int i;
7526
7527 sa_size = alpha_sa_size ();
7528 frame_size = compute_frame_size (get_frame_size (), sa_size);
7529
7530 if (flag_stack_usage)
7531 current_function_static_stack_size = frame_size;
7532
7533 if (TARGET_ABI_OPEN_VMS)
7534 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7535 else
7536 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7537
7538 alpha_sa_mask (&imask, &fmask);
7539
7540 /* Emit an insn to reload GP, if needed. */
7541 if (TARGET_ABI_OSF)
7542 {
7543 alpha_function_needs_gp = alpha_does_function_need_gp ();
7544 if (alpha_function_needs_gp)
7545 emit_insn (gen_prologue_ldgp ());
7546 }
7547
7548 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7549 the call to mcount ourselves, rather than having the linker do it
7550 magically in response to -pg. Since _mcount has special linkage,
7551 don't represent the call as a call. */
7552 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7553 emit_insn (gen_prologue_mcount ());
7554
7555 /* Adjust the stack by the frame size. If the frame size is > 4096
7556 bytes, we need to be sure we probe somewhere in the first and last
7557 4096 bytes (we can probably get away without the latter test) and
7558 every 8192 bytes in between. If the frame size is > 32768, we
7559 do this in a loop. Otherwise, we generate the explicit probe
7560 instructions.
7561
7562 Note that we are only allowed to adjust sp once in the prologue. */
7563
7564 probed_size = frame_size;
7565 if (flag_stack_check)
7566 probed_size += STACK_CHECK_PROTECT;
7567
7568 if (probed_size <= 32768)
7569 {
7570 if (probed_size > 4096)
7571 {
7572 int probed;
7573
7574 for (probed = 4096; probed < probed_size; probed += 8192)
7575 emit_insn (gen_probe_stack (GEN_INT (-probed)));
7576
7577 /* We only have to do this probe if we aren't saving registers or
7578 if we are probing beyond the frame because of -fstack-check. */
7579 if ((sa_size == 0 && probed_size > probed - 4096)
7580 || flag_stack_check)
7581 emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
7582 }
7583
7584 if (frame_size != 0)
7585 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7586 GEN_INT (-frame_size))));
7587 }
7588 else
7589 {
7590 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7591 number of 8192 byte blocks to probe. We then probe each block
7592 in the loop and then set SP to the proper location. If the
7593 amount remaining is > 4096, we have to do one more probe if we
7594 are not saving any registers or if we are probing beyond the
7595 frame because of -fstack-check. */
7596
7597 HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
7598 HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
7599 rtx ptr = gen_rtx_REG (DImode, 22);
7600 rtx count = gen_rtx_REG (DImode, 23);
7601 rtx seq;
7602
7603 emit_move_insn (count, GEN_INT (blocks));
7604 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
7605
7606 /* Because of the difficulty in emitting a new basic block this
7607 late in the compilation, generate the loop as a single insn. */
7608 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7609
7610 if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
7611 {
7612 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7613 MEM_VOLATILE_P (last) = 1;
7614 emit_move_insn (last, const0_rtx);
7615 }
7616
7617 if (flag_stack_check)
7618 {
7619 /* If -fstack-check is specified we have to load the entire
7620 constant into a register and subtract from the sp in one go,
7621 because the probed stack size is not equal to the frame size. */
7622 HOST_WIDE_INT lo, hi;
7623 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7624 hi = frame_size - lo;
7625
7626 emit_move_insn (ptr, GEN_INT (hi));
7627 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7628 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7629 ptr));
7630 }
7631 else
7632 {
7633 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7634 GEN_INT (-leftover)));
7635 }
7636
7637 /* This alternative is special, because the DWARF code cannot
7638 possibly intuit through the loop above. So we invent this
7639 note it looks at instead. */
7640 RTX_FRAME_RELATED_P (seq) = 1;
7641 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7642 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7643 plus_constant (stack_pointer_rtx,
7644 -frame_size)));
7645 }
7646
7647 /* Cope with very large offsets to the register save area. */
7648 sa_bias = 0;
7649 sa_reg = stack_pointer_rtx;
7650 if (reg_offset + sa_size > 0x8000)
7651 {
7652 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7653 rtx sa_bias_rtx;
7654
7655 if (low + sa_size <= 0x8000)
7656 sa_bias = reg_offset - low, reg_offset = low;
7657 else
7658 sa_bias = reg_offset, reg_offset = 0;
7659
7660 sa_reg = gen_rtx_REG (DImode, 24);
7661 sa_bias_rtx = GEN_INT (sa_bias);
7662
7663 if (add_operand (sa_bias_rtx, DImode))
7664 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7665 else
7666 {
7667 emit_move_insn (sa_reg, sa_bias_rtx);
7668 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7669 }
7670 }
7671
7672 /* Save regs in stack order. Beginning with VMS PV. */
7673 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7674 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7675
7676 /* Save register RA next. */
7677 if (imask & (1UL << REG_RA))
7678 {
7679 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7680 imask &= ~(1UL << REG_RA);
7681 reg_offset += 8;
7682 }
7683
7684 /* Now save any other registers required to be saved. */
7685 for (i = 0; i < 31; i++)
7686 if (imask & (1UL << i))
7687 {
7688 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7689 reg_offset += 8;
7690 }
7691
7692 for (i = 0; i < 31; i++)
7693 if (fmask & (1UL << i))
7694 {
7695 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7696 reg_offset += 8;
7697 }
7698
7699 if (TARGET_ABI_OPEN_VMS)
7700 {
7701 /* Register frame procedures save the fp. */
7702 if (alpha_procedure_type == PT_REGISTER)
7703 {
7704 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7705 hard_frame_pointer_rtx);
7706 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7707 RTX_FRAME_RELATED_P (insn) = 1;
7708 }
7709
7710 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7711 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7712 gen_rtx_REG (DImode, REG_PV)));
7713
7714 if (alpha_procedure_type != PT_NULL
7715 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7716 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7717
7718 /* If we have to allocate space for outgoing args, do it now. */
7719 if (crtl->outgoing_args_size != 0)
7720 {
7721 rtx seq
7722 = emit_move_insn (stack_pointer_rtx,
7723 plus_constant
7724 (hard_frame_pointer_rtx,
7725 - (ALPHA_ROUND
7726 (crtl->outgoing_args_size))));
7727
7728 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7729 if ! frame_pointer_needed. Setting the bit will change the CFA
7730 computation rule to use sp again, which would be wrong if we had
7731 frame_pointer_needed, as this means sp might move unpredictably
7732 later on.
7733
7734 Also, note that
7735 frame_pointer_needed
7736 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7737 and
7738 crtl->outgoing_args_size != 0
7739 => alpha_procedure_type != PT_NULL,
7740
7741 so when we are not setting the bit here, we are guaranteed to
7742 have emitted an FRP frame pointer update just before. */
7743 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7744 }
7745 }
7746 else
7747 {
7748 /* If we need a frame pointer, set it from the stack pointer. */
7749 if (frame_pointer_needed)
7750 {
7751 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7752 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7753 else
7754 /* This must always be the last instruction in the
7755 prologue, thus we emit a special move + clobber. */
7756 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7757 stack_pointer_rtx, sa_reg)));
7758 }
7759 }
7760
7761 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7762 the prologue, for exception handling reasons, we cannot do this for
7763 any insn that might fault. We could prevent this for mems with a
7764 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7765 have to prevent all such scheduling with a blockage.
7766
7767 Linux, on the other hand, never bothered to implement OSF/1's
7768 exception handling, and so doesn't care about such things. Anyone
7769 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7770
7771 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7772 emit_insn (gen_blockage ());
7773 }
7774
7775 /* Count the number of .file directives, so that .loc is up to date. */
7776 int num_source_filenames = 0;
7777
7778 /* Output the textual info surrounding the prologue. */
7779
7780 void
7781 alpha_start_function (FILE *file, const char *fnname,
7782 tree decl ATTRIBUTE_UNUSED)
7783 {
7784 unsigned long imask = 0;
7785 unsigned long fmask = 0;
7786 /* Stack space needed for pushing registers clobbered by us. */
7787 HOST_WIDE_INT sa_size;
7788 /* Complete stack size needed. */
7789 unsigned HOST_WIDE_INT frame_size;
7790 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7791 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7792 ? 524288
7793 : 1UL << 31;
7794 /* Offset from base reg to register save area. */
7795 HOST_WIDE_INT reg_offset;
7796 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7797 char *tramp_label = (char *) alloca (strlen (fnname) + 6);
7798 int i;
7799
7800 #if TARGET_ABI_OPEN_VMS
7801 if (vms_debug_main
7802 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
7803 {
7804 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
7805 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
7806 switch_to_section (text_section);
7807 vms_debug_main = NULL;
7808 }
7809 #endif
7810
7811 alpha_fnname = fnname;
7812 sa_size = alpha_sa_size ();
7813 frame_size = compute_frame_size (get_frame_size (), sa_size);
7814
7815 if (TARGET_ABI_OPEN_VMS)
7816 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
7817 else
7818 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7819
7820 alpha_sa_mask (&imask, &fmask);
7821
7822 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7823 We have to do that before the .ent directive as we cannot switch
7824 files within procedures with native ecoff because line numbers are
7825 linked to procedure descriptors.
7826 Outputting the lineno helps debugging of one line functions as they
7827 would otherwise get no line number at all. Please note that we would
7828 like to put out last_linenum from final.c, but it is not accessible. */
7829
7830 if (write_symbols == SDB_DEBUG)
7831 {
7832 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7833 ASM_OUTPUT_SOURCE_FILENAME (file,
7834 DECL_SOURCE_FILE (current_function_decl));
7835 #endif
7836 #ifdef SDB_OUTPUT_SOURCE_LINE
7837 if (debug_info_level != DINFO_LEVEL_TERSE)
7838 SDB_OUTPUT_SOURCE_LINE (file,
7839 DECL_SOURCE_LINE (current_function_decl));
7840 #endif
7841 }
7842
7843 /* Issue function start and label. */
7844 if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
7845 {
7846 fputs ("\t.ent ", file);
7847 assemble_name (file, fnname);
7848 putc ('\n', file);
7849
7850 /* If the function needs GP, we'll write the "..ng" label there.
7851 Otherwise, do it here. */
7852 if (TARGET_ABI_OSF
7853 && ! alpha_function_needs_gp
7854 && ! cfun->is_thunk)
7855 {
7856 putc ('$', file);
7857 assemble_name (file, fnname);
7858 fputs ("..ng:\n", file);
7859 }
7860 }
7861 /* Nested functions on VMS that are potentially called via trampoline
7862 get a special transfer entry point that loads the called functions
7863 procedure descriptor and static chain. */
7864 if (TARGET_ABI_OPEN_VMS
7865 && !TREE_PUBLIC (decl)
7866 && DECL_CONTEXT (decl)
7867 && !TYPE_P (DECL_CONTEXT (decl)))
7868 {
7869 strcpy (tramp_label, fnname);
7870 strcat (tramp_label, "..tr");
7871 ASM_OUTPUT_LABEL (file, tramp_label);
7872 fprintf (file, "\tldq $1,24($27)\n");
7873 fprintf (file, "\tldq $27,16($27)\n");
7874 }
7875
7876 strcpy (entry_label, fnname);
7877 if (TARGET_ABI_OPEN_VMS)
7878 strcat (entry_label, "..en");
7879
7880 ASM_OUTPUT_LABEL (file, entry_label);
7881 inside_function = TRUE;
7882
7883 if (TARGET_ABI_OPEN_VMS)
7884 fprintf (file, "\t.base $%d\n", vms_base_regno);
7885
7886 if (TARGET_ABI_OSF
7887 && TARGET_IEEE_CONFORMANT
7888 && !flag_inhibit_size_directive)
7889 {
7890 /* Set flags in procedure descriptor to request IEEE-conformant
7891 math-library routines. The value we set it to is PDSC_EXC_IEEE
7892 (/usr/include/pdsc.h). */
7893 fputs ("\t.eflag 48\n", file);
7894 }
7895
7896 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7897 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7898 alpha_arg_offset = -frame_size + 48;
7899
7900 /* Describe our frame. If the frame size is larger than an integer,
7901 print it as zero to avoid an assembler error. We won't be
7902 properly describing such a frame, but that's the best we can do. */
7903 if (TARGET_ABI_OPEN_VMS)
7904 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7905 HOST_WIDE_INT_PRINT_DEC "\n",
7906 vms_unwind_regno,
7907 frame_size >= (1UL << 31) ? 0 : frame_size,
7908 reg_offset);
7909 else if (!flag_inhibit_size_directive)
7910 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7911 (frame_pointer_needed
7912 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7913 frame_size >= max_frame_size ? 0 : frame_size,
7914 crtl->args.pretend_args_size);
7915
7916 /* Describe which registers were spilled. */
7917 if (TARGET_ABI_OPEN_VMS)
7918 {
7919 if (imask)
7920 /* ??? Does VMS care if mask contains ra? The old code didn't
7921 set it, so I don't here. */
7922 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7923 if (fmask)
7924 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7925 if (alpha_procedure_type == PT_REGISTER)
7926 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7927 }
7928 else if (!flag_inhibit_size_directive)
7929 {
7930 if (imask)
7931 {
7932 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7933 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7934
7935 for (i = 0; i < 32; ++i)
7936 if (imask & (1UL << i))
7937 reg_offset += 8;
7938 }
7939
7940 if (fmask)
7941 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7942 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7943 }
7944
7945 #if TARGET_ABI_OPEN_VMS
7946 /* If a user condition handler has been installed at some point, emit
7947 the procedure descriptor bits to point the Condition Handling Facility
7948 at the indirection wrapper, and state the fp offset at which the user
7949 handler may be found. */
7950 if (cfun->machine->uses_condition_handler)
7951 {
7952 fprintf (file, "\t.handler __gcc_shell_handler\n");
7953 fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
7954 }
7955
7956 /* Ifdef'ed cause link_section are only available then. */
7957 switch_to_section (readonly_data_section);
7958 fprintf (file, "\t.align 3\n");
7959 assemble_name (file, fnname); fputs ("..na:\n", file);
7960 fputs ("\t.ascii \"", file);
7961 assemble_name (file, fnname);
7962 fputs ("\\0\"\n", file);
7963 alpha_need_linkage (fnname, 1);
7964 switch_to_section (text_section);
7965 #endif
7966 }
7967
7968 /* Emit the .prologue note at the scheduled end of the prologue. */
7969
7970 static void
7971 alpha_output_function_end_prologue (FILE *file)
7972 {
7973 if (TARGET_ABI_OPEN_VMS)
7974 fputs ("\t.prologue\n", file);
7975 else if (!flag_inhibit_size_directive)
7976 fprintf (file, "\t.prologue %d\n",
7977 alpha_function_needs_gp || cfun->is_thunk);
7978 }
7979
7980 /* Write function epilogue. */
7981
7982 void
7983 alpha_expand_epilogue (void)
7984 {
7985 /* Registers to save. */
7986 unsigned long imask = 0;
7987 unsigned long fmask = 0;
7988 /* Stack space needed for pushing registers clobbered by us. */
7989 HOST_WIDE_INT sa_size;
7990 /* Complete stack size needed. */
7991 HOST_WIDE_INT frame_size;
7992 /* Offset from base reg to register save area. */
7993 HOST_WIDE_INT reg_offset;
7994 int fp_is_frame_pointer, fp_offset;
7995 rtx sa_reg, sa_reg_exp = NULL;
7996 rtx sp_adj1, sp_adj2, mem, reg, insn;
7997 rtx eh_ofs;
7998 rtx cfa_restores = NULL_RTX;
7999 int i;
8000
8001 sa_size = alpha_sa_size ();
8002 frame_size = compute_frame_size (get_frame_size (), sa_size);
8003
8004 if (TARGET_ABI_OPEN_VMS)
8005 {
8006 if (alpha_procedure_type == PT_STACK)
8007 reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
8008 else
8009 reg_offset = 0;
8010 }
8011 else
8012 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8013
8014 alpha_sa_mask (&imask, &fmask);
8015
8016 fp_is_frame_pointer
8017 = (TARGET_ABI_OPEN_VMS
8018 ? alpha_procedure_type == PT_STACK
8019 : frame_pointer_needed);
8020 fp_offset = 0;
8021 sa_reg = stack_pointer_rtx;
8022
8023 if (crtl->calls_eh_return)
8024 eh_ofs = EH_RETURN_STACKADJ_RTX;
8025 else
8026 eh_ofs = NULL_RTX;
8027
8028 if (sa_size)
8029 {
8030 /* If we have a frame pointer, restore SP from it. */
8031 if (TARGET_ABI_OPEN_VMS
8032 ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
8033 : frame_pointer_needed)
8034 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8035
8036 /* Cope with very large offsets to the register save area. */
8037 if (reg_offset + sa_size > 0x8000)
8038 {
8039 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8040 HOST_WIDE_INT bias;
8041
8042 if (low + sa_size <= 0x8000)
8043 bias = reg_offset - low, reg_offset = low;
8044 else
8045 bias = reg_offset, reg_offset = 0;
8046
8047 sa_reg = gen_rtx_REG (DImode, 22);
8048 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8049
8050 emit_move_insn (sa_reg, sa_reg_exp);
8051 }
8052
8053 /* Restore registers in order, excepting a true frame pointer. */
8054
8055 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8056 if (! eh_ofs)
8057 set_mem_alias_set (mem, alpha_sr_alias_set);
8058 reg = gen_rtx_REG (DImode, REG_RA);
8059 emit_move_insn (reg, mem);
8060 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8061
8062 reg_offset += 8;
8063 imask &= ~(1UL << REG_RA);
8064
8065 for (i = 0; i < 31; ++i)
8066 if (imask & (1UL << i))
8067 {
8068 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8069 fp_offset = reg_offset;
8070 else
8071 {
8072 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8073 set_mem_alias_set (mem, alpha_sr_alias_set);
8074 reg = gen_rtx_REG (DImode, i);
8075 emit_move_insn (reg, mem);
8076 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8077 cfa_restores);
8078 }
8079 reg_offset += 8;
8080 }
8081
8082 for (i = 0; i < 31; ++i)
8083 if (fmask & (1UL << i))
8084 {
8085 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8086 set_mem_alias_set (mem, alpha_sr_alias_set);
8087 reg = gen_rtx_REG (DFmode, i+32);
8088 emit_move_insn (reg, mem);
8089 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8090 reg_offset += 8;
8091 }
8092 }
8093
8094 if (frame_size || eh_ofs)
8095 {
8096 sp_adj1 = stack_pointer_rtx;
8097
8098 if (eh_ofs)
8099 {
8100 sp_adj1 = gen_rtx_REG (DImode, 23);
8101 emit_move_insn (sp_adj1,
8102 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8103 }
8104
8105 /* If the stack size is large, begin computation into a temporary
8106 register so as not to interfere with a potential fp restore,
8107 which must be consecutive with an SP restore. */
8108 if (frame_size < 32768 && !cfun->calls_alloca)
8109 sp_adj2 = GEN_INT (frame_size);
8110 else if (frame_size < 0x40007fffL)
8111 {
8112 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8113
8114 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8115 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8116 sp_adj1 = sa_reg;
8117 else
8118 {
8119 sp_adj1 = gen_rtx_REG (DImode, 23);
8120 emit_move_insn (sp_adj1, sp_adj2);
8121 }
8122 sp_adj2 = GEN_INT (low);
8123 }
8124 else
8125 {
8126 rtx tmp = gen_rtx_REG (DImode, 23);
8127 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8128 if (!sp_adj2)
8129 {
8130 /* We can't drop new things to memory this late, afaik,
8131 so build it up by pieces. */
8132 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8133 -(frame_size < 0));
8134 gcc_assert (sp_adj2);
8135 }
8136 }
8137
8138 /* From now on, things must be in order. So emit blockages. */
8139
8140 /* Restore the frame pointer. */
8141 if (fp_is_frame_pointer)
8142 {
8143 emit_insn (gen_blockage ());
8144 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8145 set_mem_alias_set (mem, alpha_sr_alias_set);
8146 emit_move_insn (hard_frame_pointer_rtx, mem);
8147 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8148 hard_frame_pointer_rtx, cfa_restores);
8149 }
8150 else if (TARGET_ABI_OPEN_VMS)
8151 {
8152 emit_insn (gen_blockage ());
8153 emit_move_insn (hard_frame_pointer_rtx,
8154 gen_rtx_REG (DImode, vms_save_fp_regno));
8155 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8156 hard_frame_pointer_rtx, cfa_restores);
8157 }
8158
8159 /* Restore the stack pointer. */
8160 emit_insn (gen_blockage ());
8161 if (sp_adj2 == const0_rtx)
8162 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8163 else
8164 insn = emit_move_insn (stack_pointer_rtx,
8165 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8166 REG_NOTES (insn) = cfa_restores;
8167 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8168 RTX_FRAME_RELATED_P (insn) = 1;
8169 }
8170 else
8171 {
8172 gcc_assert (cfa_restores == NULL);
8173
8174 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8175 {
8176 emit_insn (gen_blockage ());
8177 insn = emit_move_insn (hard_frame_pointer_rtx,
8178 gen_rtx_REG (DImode, vms_save_fp_regno));
8179 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8180 RTX_FRAME_RELATED_P (insn) = 1;
8181 }
8182 }
8183 }
8184 \f
8185 /* Output the rest of the textual info surrounding the epilogue. */
8186
8187 void
8188 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8189 {
8190 rtx insn;
8191
8192 /* We output a nop after noreturn calls at the very end of the function to
8193 ensure that the return address always remains in the caller's code range,
8194 as not doing so might confuse unwinding engines. */
8195 insn = get_last_insn ();
8196 if (!INSN_P (insn))
8197 insn = prev_active_insn (insn);
8198 if (insn && CALL_P (insn))
8199 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8200
8201 #if TARGET_ABI_OPEN_VMS
8202 alpha_write_linkage (file, fnname, decl);
8203 #endif
8204
8205 /* End the function. */
8206 if (!flag_inhibit_size_directive)
8207 {
8208 fputs ("\t.end ", file);
8209 assemble_name (file, fnname);
8210 putc ('\n', file);
8211 }
8212 inside_function = FALSE;
8213 }
8214
8215 #if TARGET_ABI_OPEN_VMS
8216 void avms_asm_output_external (FILE *file, tree decl ATTRIBUTE_UNUSED, const char *name)
8217 {
8218 #ifdef DO_CRTL_NAMES
8219 DO_CRTL_NAMES;
8220 #endif
8221 }
8222 #endif
8223
8224 #if TARGET_ABI_OSF
8225 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8226
8227 In order to avoid the hordes of differences between generated code
8228 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8229 lots of code loading up large constants, generate rtl and emit it
8230 instead of going straight to text.
8231
8232 Not sure why this idea hasn't been explored before... */
8233
8234 static void
8235 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8236 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8237 tree function)
8238 {
8239 HOST_WIDE_INT hi, lo;
8240 rtx this_rtx, insn, funexp;
8241
8242 /* We always require a valid GP. */
8243 emit_insn (gen_prologue_ldgp ());
8244 emit_note (NOTE_INSN_PROLOGUE_END);
8245
8246 /* Find the "this" pointer. If the function returns a structure,
8247 the structure return pointer is in $16. */
8248 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8249 this_rtx = gen_rtx_REG (Pmode, 17);
8250 else
8251 this_rtx = gen_rtx_REG (Pmode, 16);
8252
8253 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8254 entire constant for the add. */
8255 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8256 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8257 if (hi + lo == delta)
8258 {
8259 if (hi)
8260 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8261 if (lo)
8262 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8263 }
8264 else
8265 {
8266 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8267 delta, -(delta < 0));
8268 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8269 }
8270
8271 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8272 if (vcall_offset)
8273 {
8274 rtx tmp, tmp2;
8275
8276 tmp = gen_rtx_REG (Pmode, 0);
8277 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8278
8279 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8280 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8281 if (hi + lo == vcall_offset)
8282 {
8283 if (hi)
8284 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8285 }
8286 else
8287 {
8288 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8289 vcall_offset, -(vcall_offset < 0));
8290 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8291 lo = 0;
8292 }
8293 if (lo)
8294 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8295 else
8296 tmp2 = tmp;
8297 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8298
8299 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8300 }
8301
8302 /* Generate a tail call to the target function. */
8303 if (! TREE_USED (function))
8304 {
8305 assemble_external (function);
8306 TREE_USED (function) = 1;
8307 }
8308 funexp = XEXP (DECL_RTL (function), 0);
8309 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8310 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8311 SIBLING_CALL_P (insn) = 1;
8312
8313 /* Run just enough of rest_of_compilation to get the insns emitted.
8314 There's not really enough bulk here to make other passes such as
8315 instruction scheduling worth while. Note that use_thunk calls
8316 assemble_start_function and assemble_end_function. */
8317 insn = get_insns ();
8318 insn_locators_alloc ();
8319 shorten_branches (insn);
8320 final_start_function (insn, file, 1);
8321 final (insn, file, 1);
8322 final_end_function ();
8323 }
8324 #endif /* TARGET_ABI_OSF */
8325 \f
8326 /* Debugging support. */
8327
8328 #include "gstab.h"
8329
8330 /* Count the number of sdb related labels are generated (to find block
8331 start and end boundaries). */
8332
8333 int sdb_label_count = 0;
8334
8335 /* Name of the file containing the current function. */
8336
8337 static const char *current_function_file = "";
8338
8339 /* Offsets to alpha virtual arg/local debugging pointers. */
8340
8341 long alpha_arg_offset;
8342 long alpha_auto_offset;
8343 \f
8344 /* Emit a new filename to a stream. */
8345
8346 void
8347 alpha_output_filename (FILE *stream, const char *name)
8348 {
8349 static int first_time = TRUE;
8350
8351 if (first_time)
8352 {
8353 first_time = FALSE;
8354 ++num_source_filenames;
8355 current_function_file = name;
8356 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8357 output_quoted_string (stream, name);
8358 fprintf (stream, "\n");
8359 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8360 fprintf (stream, "\t#@stabs\n");
8361 }
8362
8363 else if (write_symbols == DBX_DEBUG)
8364 /* dbxout.c will emit an appropriate .stabs directive. */
8365 return;
8366
8367 else if (name != current_function_file
8368 && strcmp (name, current_function_file) != 0)
8369 {
8370 if (inside_function && ! TARGET_GAS)
8371 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8372 else
8373 {
8374 ++num_source_filenames;
8375 current_function_file = name;
8376 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8377 }
8378
8379 output_quoted_string (stream, name);
8380 fprintf (stream, "\n");
8381 }
8382 }
8383 \f
8384 /* Structure to show the current status of registers and memory. */
8385
8386 struct shadow_summary
8387 {
8388 struct {
8389 unsigned int i : 31; /* Mask of int regs */
8390 unsigned int fp : 31; /* Mask of fp regs */
8391 unsigned int mem : 1; /* mem == imem | fpmem */
8392 } used, defd;
8393 };
8394
8395 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8396 to the summary structure. SET is nonzero if the insn is setting the
8397 object, otherwise zero. */
8398
8399 static void
8400 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8401 {
8402 const char *format_ptr;
8403 int i, j;
8404
8405 if (x == 0)
8406 return;
8407
8408 switch (GET_CODE (x))
8409 {
8410 /* ??? Note that this case would be incorrect if the Alpha had a
8411 ZERO_EXTRACT in SET_DEST. */
8412 case SET:
8413 summarize_insn (SET_SRC (x), sum, 0);
8414 summarize_insn (SET_DEST (x), sum, 1);
8415 break;
8416
8417 case CLOBBER:
8418 summarize_insn (XEXP (x, 0), sum, 1);
8419 break;
8420
8421 case USE:
8422 summarize_insn (XEXP (x, 0), sum, 0);
8423 break;
8424
8425 case ASM_OPERANDS:
8426 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8427 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8428 break;
8429
8430 case PARALLEL:
8431 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8432 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8433 break;
8434
8435 case SUBREG:
8436 summarize_insn (SUBREG_REG (x), sum, 0);
8437 break;
8438
8439 case REG:
8440 {
8441 int regno = REGNO (x);
8442 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8443
8444 if (regno == 31 || regno == 63)
8445 break;
8446
8447 if (set)
8448 {
8449 if (regno < 32)
8450 sum->defd.i |= mask;
8451 else
8452 sum->defd.fp |= mask;
8453 }
8454 else
8455 {
8456 if (regno < 32)
8457 sum->used.i |= mask;
8458 else
8459 sum->used.fp |= mask;
8460 }
8461 }
8462 break;
8463
8464 case MEM:
8465 if (set)
8466 sum->defd.mem = 1;
8467 else
8468 sum->used.mem = 1;
8469
8470 /* Find the regs used in memory address computation: */
8471 summarize_insn (XEXP (x, 0), sum, 0);
8472 break;
8473
8474 case CONST_INT: case CONST_DOUBLE:
8475 case SYMBOL_REF: case LABEL_REF: case CONST:
8476 case SCRATCH: case ASM_INPUT:
8477 break;
8478
8479 /* Handle common unary and binary ops for efficiency. */
8480 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8481 case MOD: case UDIV: case UMOD: case AND: case IOR:
8482 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8483 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8484 case NE: case EQ: case GE: case GT: case LE:
8485 case LT: case GEU: case GTU: case LEU: case LTU:
8486 summarize_insn (XEXP (x, 0), sum, 0);
8487 summarize_insn (XEXP (x, 1), sum, 0);
8488 break;
8489
8490 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8491 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8492 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8493 case SQRT: case FFS:
8494 summarize_insn (XEXP (x, 0), sum, 0);
8495 break;
8496
8497 default:
8498 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8499 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8500 switch (format_ptr[i])
8501 {
8502 case 'e':
8503 summarize_insn (XEXP (x, i), sum, 0);
8504 break;
8505
8506 case 'E':
8507 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8508 summarize_insn (XVECEXP (x, i, j), sum, 0);
8509 break;
8510
8511 case 'i':
8512 break;
8513
8514 default:
8515 gcc_unreachable ();
8516 }
8517 }
8518 }
8519
8520 /* Ensure a sufficient number of `trapb' insns are in the code when
8521 the user requests code with a trap precision of functions or
8522 instructions.
8523
8524 In naive mode, when the user requests a trap-precision of
8525 "instruction", a trapb is needed after every instruction that may
8526 generate a trap. This ensures that the code is resumption safe but
8527 it is also slow.
8528
8529 When optimizations are turned on, we delay issuing a trapb as long
8530 as possible. In this context, a trap shadow is the sequence of
8531 instructions that starts with a (potentially) trap generating
8532 instruction and extends to the next trapb or call_pal instruction
8533 (but GCC never generates call_pal by itself). We can delay (and
8534 therefore sometimes omit) a trapb subject to the following
8535 conditions:
8536
8537 (a) On entry to the trap shadow, if any Alpha register or memory
8538 location contains a value that is used as an operand value by some
8539 instruction in the trap shadow (live on entry), then no instruction
8540 in the trap shadow may modify the register or memory location.
8541
8542 (b) Within the trap shadow, the computation of the base register
8543 for a memory load or store instruction may not involve using the
8544 result of an instruction that might generate an UNPREDICTABLE
8545 result.
8546
8547 (c) Within the trap shadow, no register may be used more than once
8548 as a destination register. (This is to make life easier for the
8549 trap-handler.)
8550
8551 (d) The trap shadow may not include any branch instructions. */
8552
8553 static void
8554 alpha_handle_trap_shadows (void)
8555 {
8556 struct shadow_summary shadow;
8557 int trap_pending, exception_nesting;
8558 rtx i, n;
8559
8560 trap_pending = 0;
8561 exception_nesting = 0;
8562 shadow.used.i = 0;
8563 shadow.used.fp = 0;
8564 shadow.used.mem = 0;
8565 shadow.defd = shadow.used;
8566
8567 for (i = get_insns (); i ; i = NEXT_INSN (i))
8568 {
8569 if (NOTE_P (i))
8570 {
8571 switch (NOTE_KIND (i))
8572 {
8573 case NOTE_INSN_EH_REGION_BEG:
8574 exception_nesting++;
8575 if (trap_pending)
8576 goto close_shadow;
8577 break;
8578
8579 case NOTE_INSN_EH_REGION_END:
8580 exception_nesting--;
8581 if (trap_pending)
8582 goto close_shadow;
8583 break;
8584
8585 case NOTE_INSN_EPILOGUE_BEG:
8586 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8587 goto close_shadow;
8588 break;
8589 }
8590 }
8591 else if (trap_pending)
8592 {
8593 if (alpha_tp == ALPHA_TP_FUNC)
8594 {
8595 if (JUMP_P (i)
8596 && GET_CODE (PATTERN (i)) == RETURN)
8597 goto close_shadow;
8598 }
8599 else if (alpha_tp == ALPHA_TP_INSN)
8600 {
8601 if (optimize > 0)
8602 {
8603 struct shadow_summary sum;
8604
8605 sum.used.i = 0;
8606 sum.used.fp = 0;
8607 sum.used.mem = 0;
8608 sum.defd = sum.used;
8609
8610 switch (GET_CODE (i))
8611 {
8612 case INSN:
8613 /* Annoyingly, get_attr_trap will die on these. */
8614 if (GET_CODE (PATTERN (i)) == USE
8615 || GET_CODE (PATTERN (i)) == CLOBBER)
8616 break;
8617
8618 summarize_insn (PATTERN (i), &sum, 0);
8619
8620 if ((sum.defd.i & shadow.defd.i)
8621 || (sum.defd.fp & shadow.defd.fp))
8622 {
8623 /* (c) would be violated */
8624 goto close_shadow;
8625 }
8626
8627 /* Combine shadow with summary of current insn: */
8628 shadow.used.i |= sum.used.i;
8629 shadow.used.fp |= sum.used.fp;
8630 shadow.used.mem |= sum.used.mem;
8631 shadow.defd.i |= sum.defd.i;
8632 shadow.defd.fp |= sum.defd.fp;
8633 shadow.defd.mem |= sum.defd.mem;
8634
8635 if ((sum.defd.i & shadow.used.i)
8636 || (sum.defd.fp & shadow.used.fp)
8637 || (sum.defd.mem & shadow.used.mem))
8638 {
8639 /* (a) would be violated (also takes care of (b)) */
8640 gcc_assert (get_attr_trap (i) != TRAP_YES
8641 || (!(sum.defd.i & sum.used.i)
8642 && !(sum.defd.fp & sum.used.fp)));
8643
8644 goto close_shadow;
8645 }
8646 break;
8647
8648 case JUMP_INSN:
8649 case CALL_INSN:
8650 case CODE_LABEL:
8651 goto close_shadow;
8652
8653 default:
8654 gcc_unreachable ();
8655 }
8656 }
8657 else
8658 {
8659 close_shadow:
8660 n = emit_insn_before (gen_trapb (), i);
8661 PUT_MODE (n, TImode);
8662 PUT_MODE (i, TImode);
8663 trap_pending = 0;
8664 shadow.used.i = 0;
8665 shadow.used.fp = 0;
8666 shadow.used.mem = 0;
8667 shadow.defd = shadow.used;
8668 }
8669 }
8670 }
8671
8672 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8673 && NONJUMP_INSN_P (i)
8674 && GET_CODE (PATTERN (i)) != USE
8675 && GET_CODE (PATTERN (i)) != CLOBBER
8676 && get_attr_trap (i) == TRAP_YES)
8677 {
8678 if (optimize && !trap_pending)
8679 summarize_insn (PATTERN (i), &shadow, 0);
8680 trap_pending = 1;
8681 }
8682 }
8683 }
8684 \f
8685 /* Alpha can only issue instruction groups simultaneously if they are
8686 suitably aligned. This is very processor-specific. */
8687 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8688 that are marked "fake". These instructions do not exist on that target,
8689 but it is possible to see these insns with deranged combinations of
8690 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8691 choose a result at random. */
8692
8693 enum alphaev4_pipe {
8694 EV4_STOP = 0,
8695 EV4_IB0 = 1,
8696 EV4_IB1 = 2,
8697 EV4_IBX = 4
8698 };
8699
8700 enum alphaev5_pipe {
8701 EV5_STOP = 0,
8702 EV5_NONE = 1,
8703 EV5_E01 = 2,
8704 EV5_E0 = 4,
8705 EV5_E1 = 8,
8706 EV5_FAM = 16,
8707 EV5_FA = 32,
8708 EV5_FM = 64
8709 };
8710
8711 static enum alphaev4_pipe
8712 alphaev4_insn_pipe (rtx insn)
8713 {
8714 if (recog_memoized (insn) < 0)
8715 return EV4_STOP;
8716 if (get_attr_length (insn) != 4)
8717 return EV4_STOP;
8718
8719 switch (get_attr_type (insn))
8720 {
8721 case TYPE_ILD:
8722 case TYPE_LDSYM:
8723 case TYPE_FLD:
8724 case TYPE_LD_L:
8725 return EV4_IBX;
8726
8727 case TYPE_IADD:
8728 case TYPE_ILOG:
8729 case TYPE_ICMOV:
8730 case TYPE_ICMP:
8731 case TYPE_FST:
8732 case TYPE_SHIFT:
8733 case TYPE_IMUL:
8734 case TYPE_FBR:
8735 case TYPE_MVI: /* fake */
8736 return EV4_IB0;
8737
8738 case TYPE_IST:
8739 case TYPE_MISC:
8740 case TYPE_IBR:
8741 case TYPE_JSR:
8742 case TYPE_CALLPAL:
8743 case TYPE_FCPYS:
8744 case TYPE_FCMOV:
8745 case TYPE_FADD:
8746 case TYPE_FDIV:
8747 case TYPE_FMUL:
8748 case TYPE_ST_C:
8749 case TYPE_MB:
8750 case TYPE_FSQRT: /* fake */
8751 case TYPE_FTOI: /* fake */
8752 case TYPE_ITOF: /* fake */
8753 return EV4_IB1;
8754
8755 default:
8756 gcc_unreachable ();
8757 }
8758 }
8759
8760 static enum alphaev5_pipe
8761 alphaev5_insn_pipe (rtx insn)
8762 {
8763 if (recog_memoized (insn) < 0)
8764 return EV5_STOP;
8765 if (get_attr_length (insn) != 4)
8766 return EV5_STOP;
8767
8768 switch (get_attr_type (insn))
8769 {
8770 case TYPE_ILD:
8771 case TYPE_FLD:
8772 case TYPE_LDSYM:
8773 case TYPE_IADD:
8774 case TYPE_ILOG:
8775 case TYPE_ICMOV:
8776 case TYPE_ICMP:
8777 return EV5_E01;
8778
8779 case TYPE_IST:
8780 case TYPE_FST:
8781 case TYPE_SHIFT:
8782 case TYPE_IMUL:
8783 case TYPE_MISC:
8784 case TYPE_MVI:
8785 case TYPE_LD_L:
8786 case TYPE_ST_C:
8787 case TYPE_MB:
8788 case TYPE_FTOI: /* fake */
8789 case TYPE_ITOF: /* fake */
8790 return EV5_E0;
8791
8792 case TYPE_IBR:
8793 case TYPE_JSR:
8794 case TYPE_CALLPAL:
8795 return EV5_E1;
8796
8797 case TYPE_FCPYS:
8798 return EV5_FAM;
8799
8800 case TYPE_FBR:
8801 case TYPE_FCMOV:
8802 case TYPE_FADD:
8803 case TYPE_FDIV:
8804 case TYPE_FSQRT: /* fake */
8805 return EV5_FA;
8806
8807 case TYPE_FMUL:
8808 return EV5_FM;
8809
8810 default:
8811 gcc_unreachable ();
8812 }
8813 }
8814
8815 /* IN_USE is a mask of the slots currently filled within the insn group.
8816 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8817 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8818
8819 LEN is, of course, the length of the group in bytes. */
8820
8821 static rtx
8822 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8823 {
8824 int len, in_use;
8825
8826 len = in_use = 0;
8827
8828 if (! INSN_P (insn)
8829 || GET_CODE (PATTERN (insn)) == CLOBBER
8830 || GET_CODE (PATTERN (insn)) == USE)
8831 goto next_and_done;
8832
8833 while (1)
8834 {
8835 enum alphaev4_pipe pipe;
8836
8837 pipe = alphaev4_insn_pipe (insn);
8838 switch (pipe)
8839 {
8840 case EV4_STOP:
8841 /* Force complex instructions to start new groups. */
8842 if (in_use)
8843 goto done;
8844
8845 /* If this is a completely unrecognized insn, it's an asm.
8846 We don't know how long it is, so record length as -1 to
8847 signal a needed realignment. */
8848 if (recog_memoized (insn) < 0)
8849 len = -1;
8850 else
8851 len = get_attr_length (insn);
8852 goto next_and_done;
8853
8854 case EV4_IBX:
8855 if (in_use & EV4_IB0)
8856 {
8857 if (in_use & EV4_IB1)
8858 goto done;
8859 in_use |= EV4_IB1;
8860 }
8861 else
8862 in_use |= EV4_IB0 | EV4_IBX;
8863 break;
8864
8865 case EV4_IB0:
8866 if (in_use & EV4_IB0)
8867 {
8868 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8869 goto done;
8870 in_use |= EV4_IB1;
8871 }
8872 in_use |= EV4_IB0;
8873 break;
8874
8875 case EV4_IB1:
8876 if (in_use & EV4_IB1)
8877 goto done;
8878 in_use |= EV4_IB1;
8879 break;
8880
8881 default:
8882 gcc_unreachable ();
8883 }
8884 len += 4;
8885
8886 /* Haifa doesn't do well scheduling branches. */
8887 if (JUMP_P (insn))
8888 goto next_and_done;
8889
8890 next:
8891 insn = next_nonnote_insn (insn);
8892
8893 if (!insn || ! INSN_P (insn))
8894 goto done;
8895
8896 /* Let Haifa tell us where it thinks insn group boundaries are. */
8897 if (GET_MODE (insn) == TImode)
8898 goto done;
8899
8900 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8901 goto next;
8902 }
8903
8904 next_and_done:
8905 insn = next_nonnote_insn (insn);
8906
8907 done:
8908 *plen = len;
8909 *pin_use = in_use;
8910 return insn;
8911 }
8912
8913 /* IN_USE is a mask of the slots currently filled within the insn group.
8914 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8915 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8916
8917 LEN is, of course, the length of the group in bytes. */
8918
8919 static rtx
8920 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8921 {
8922 int len, in_use;
8923
8924 len = in_use = 0;
8925
8926 if (! INSN_P (insn)
8927 || GET_CODE (PATTERN (insn)) == CLOBBER
8928 || GET_CODE (PATTERN (insn)) == USE)
8929 goto next_and_done;
8930
8931 while (1)
8932 {
8933 enum alphaev5_pipe pipe;
8934
8935 pipe = alphaev5_insn_pipe (insn);
8936 switch (pipe)
8937 {
8938 case EV5_STOP:
8939 /* Force complex instructions to start new groups. */
8940 if (in_use)
8941 goto done;
8942
8943 /* If this is a completely unrecognized insn, it's an asm.
8944 We don't know how long it is, so record length as -1 to
8945 signal a needed realignment. */
8946 if (recog_memoized (insn) < 0)
8947 len = -1;
8948 else
8949 len = get_attr_length (insn);
8950 goto next_and_done;
8951
8952 /* ??? Most of the places below, we would like to assert never
8953 happen, as it would indicate an error either in Haifa, or
8954 in the scheduling description. Unfortunately, Haifa never
8955 schedules the last instruction of the BB, so we don't have
8956 an accurate TI bit to go off. */
8957 case EV5_E01:
8958 if (in_use & EV5_E0)
8959 {
8960 if (in_use & EV5_E1)
8961 goto done;
8962 in_use |= EV5_E1;
8963 }
8964 else
8965 in_use |= EV5_E0 | EV5_E01;
8966 break;
8967
8968 case EV5_E0:
8969 if (in_use & EV5_E0)
8970 {
8971 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
8972 goto done;
8973 in_use |= EV5_E1;
8974 }
8975 in_use |= EV5_E0;
8976 break;
8977
8978 case EV5_E1:
8979 if (in_use & EV5_E1)
8980 goto done;
8981 in_use |= EV5_E1;
8982 break;
8983
8984 case EV5_FAM:
8985 if (in_use & EV5_FA)
8986 {
8987 if (in_use & EV5_FM)
8988 goto done;
8989 in_use |= EV5_FM;
8990 }
8991 else
8992 in_use |= EV5_FA | EV5_FAM;
8993 break;
8994
8995 case EV5_FA:
8996 if (in_use & EV5_FA)
8997 goto done;
8998 in_use |= EV5_FA;
8999 break;
9000
9001 case EV5_FM:
9002 if (in_use & EV5_FM)
9003 goto done;
9004 in_use |= EV5_FM;
9005 break;
9006
9007 case EV5_NONE:
9008 break;
9009
9010 default:
9011 gcc_unreachable ();
9012 }
9013 len += 4;
9014
9015 /* Haifa doesn't do well scheduling branches. */
9016 /* ??? If this is predicted not-taken, slotting continues, except
9017 that no more IBR, FBR, or JSR insns may be slotted. */
9018 if (JUMP_P (insn))
9019 goto next_and_done;
9020
9021 next:
9022 insn = next_nonnote_insn (insn);
9023
9024 if (!insn || ! INSN_P (insn))
9025 goto done;
9026
9027 /* Let Haifa tell us where it thinks insn group boundaries are. */
9028 if (GET_MODE (insn) == TImode)
9029 goto done;
9030
9031 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9032 goto next;
9033 }
9034
9035 next_and_done:
9036 insn = next_nonnote_insn (insn);
9037
9038 done:
9039 *plen = len;
9040 *pin_use = in_use;
9041 return insn;
9042 }
9043
9044 static rtx
9045 alphaev4_next_nop (int *pin_use)
9046 {
9047 int in_use = *pin_use;
9048 rtx nop;
9049
9050 if (!(in_use & EV4_IB0))
9051 {
9052 in_use |= EV4_IB0;
9053 nop = gen_nop ();
9054 }
9055 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9056 {
9057 in_use |= EV4_IB1;
9058 nop = gen_nop ();
9059 }
9060 else if (TARGET_FP && !(in_use & EV4_IB1))
9061 {
9062 in_use |= EV4_IB1;
9063 nop = gen_fnop ();
9064 }
9065 else
9066 nop = gen_unop ();
9067
9068 *pin_use = in_use;
9069 return nop;
9070 }
9071
9072 static rtx
9073 alphaev5_next_nop (int *pin_use)
9074 {
9075 int in_use = *pin_use;
9076 rtx nop;
9077
9078 if (!(in_use & EV5_E1))
9079 {
9080 in_use |= EV5_E1;
9081 nop = gen_nop ();
9082 }
9083 else if (TARGET_FP && !(in_use & EV5_FA))
9084 {
9085 in_use |= EV5_FA;
9086 nop = gen_fnop ();
9087 }
9088 else if (TARGET_FP && !(in_use & EV5_FM))
9089 {
9090 in_use |= EV5_FM;
9091 nop = gen_fnop ();
9092 }
9093 else
9094 nop = gen_unop ();
9095
9096 *pin_use = in_use;
9097 return nop;
9098 }
9099
9100 /* The instruction group alignment main loop. */
9101
9102 static void
9103 alpha_align_insns (unsigned int max_align,
9104 rtx (*next_group) (rtx, int *, int *),
9105 rtx (*next_nop) (int *))
9106 {
9107 /* ALIGN is the known alignment for the insn group. */
9108 unsigned int align;
9109 /* OFS is the offset of the current insn in the insn group. */
9110 int ofs;
9111 int prev_in_use, in_use, len, ldgp;
9112 rtx i, next;
9113
9114 /* Let shorten branches care for assigning alignments to code labels. */
9115 shorten_branches (get_insns ());
9116
9117 if (align_functions < 4)
9118 align = 4;
9119 else if ((unsigned int) align_functions < max_align)
9120 align = align_functions;
9121 else
9122 align = max_align;
9123
9124 ofs = prev_in_use = 0;
9125 i = get_insns ();
9126 if (NOTE_P (i))
9127 i = next_nonnote_insn (i);
9128
9129 ldgp = alpha_function_needs_gp ? 8 : 0;
9130
9131 while (i)
9132 {
9133 next = (*next_group) (i, &in_use, &len);
9134
9135 /* When we see a label, resync alignment etc. */
9136 if (LABEL_P (i))
9137 {
9138 unsigned int new_align = 1 << label_to_alignment (i);
9139
9140 if (new_align >= align)
9141 {
9142 align = new_align < max_align ? new_align : max_align;
9143 ofs = 0;
9144 }
9145
9146 else if (ofs & (new_align-1))
9147 ofs = (ofs | (new_align-1)) + 1;
9148 gcc_assert (!len);
9149 }
9150
9151 /* Handle complex instructions special. */
9152 else if (in_use == 0)
9153 {
9154 /* Asms will have length < 0. This is a signal that we have
9155 lost alignment knowledge. Assume, however, that the asm
9156 will not mis-align instructions. */
9157 if (len < 0)
9158 {
9159 ofs = 0;
9160 align = 4;
9161 len = 0;
9162 }
9163 }
9164
9165 /* If the known alignment is smaller than the recognized insn group,
9166 realign the output. */
9167 else if ((int) align < len)
9168 {
9169 unsigned int new_log_align = len > 8 ? 4 : 3;
9170 rtx prev, where;
9171
9172 where = prev = prev_nonnote_insn (i);
9173 if (!where || !LABEL_P (where))
9174 where = i;
9175
9176 /* Can't realign between a call and its gp reload. */
9177 if (! (TARGET_EXPLICIT_RELOCS
9178 && prev && CALL_P (prev)))
9179 {
9180 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9181 align = 1 << new_log_align;
9182 ofs = 0;
9183 }
9184 }
9185
9186 /* We may not insert padding inside the initial ldgp sequence. */
9187 else if (ldgp > 0)
9188 ldgp -= len;
9189
9190 /* If the group won't fit in the same INT16 as the previous,
9191 we need to add padding to keep the group together. Rather
9192 than simply leaving the insn filling to the assembler, we
9193 can make use of the knowledge of what sorts of instructions
9194 were issued in the previous group to make sure that all of
9195 the added nops are really free. */
9196 else if (ofs + len > (int) align)
9197 {
9198 int nop_count = (align - ofs) / 4;
9199 rtx where;
9200
9201 /* Insert nops before labels, branches, and calls to truly merge
9202 the execution of the nops with the previous instruction group. */
9203 where = prev_nonnote_insn (i);
9204 if (where)
9205 {
9206 if (LABEL_P (where))
9207 {
9208 rtx where2 = prev_nonnote_insn (where);
9209 if (where2 && JUMP_P (where2))
9210 where = where2;
9211 }
9212 else if (NONJUMP_INSN_P (where))
9213 where = i;
9214 }
9215 else
9216 where = i;
9217
9218 do
9219 emit_insn_before ((*next_nop)(&prev_in_use), where);
9220 while (--nop_count);
9221 ofs = 0;
9222 }
9223
9224 ofs = (ofs + len) & (align - 1);
9225 prev_in_use = in_use;
9226 i = next;
9227 }
9228 }
9229
9230 /* Insert an unop between a noreturn function call and GP load. */
9231
9232 static void
9233 alpha_pad_noreturn (void)
9234 {
9235 rtx insn, next;
9236
9237 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9238 {
9239 if (! (CALL_P (insn)
9240 && find_reg_note (insn, REG_NORETURN, NULL_RTX)))
9241 continue;
9242
9243 /* Make sure we do not split a call and its corresponding
9244 CALL_ARG_LOCATION note. */
9245 if (CALL_P (insn))
9246 {
9247 next = NEXT_INSN (insn);
9248 if (next && NOTE_P (next)
9249 && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
9250 insn = next;
9251 }
9252
9253 next = next_active_insn (insn);
9254
9255 if (next)
9256 {
9257 rtx pat = PATTERN (next);
9258
9259 if (GET_CODE (pat) == SET
9260 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9261 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9262 emit_insn_after (gen_unop (), insn);
9263 }
9264 }
9265 }
9266 \f
9267 /* Machine dependent reorg pass. */
9268
9269 static void
9270 alpha_reorg (void)
9271 {
9272 /* Workaround for a linker error that triggers when an
9273 exception handler immediatelly follows a noreturn function.
9274
9275 The instruction stream from an object file:
9276
9277 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9278 58: 00 00 ba 27 ldah gp,0(ra)
9279 5c: 00 00 bd 23 lda gp,0(gp)
9280 60: 00 00 7d a7 ldq t12,0(gp)
9281 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9282
9283 was converted in the final link pass to:
9284
9285 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9286 fdb28: 00 00 fe 2f unop
9287 fdb2c: 00 00 fe 2f unop
9288 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9289 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9290
9291 GP load instructions were wrongly cleared by the linker relaxation
9292 pass. This workaround prevents removal of GP loads by inserting
9293 an unop instruction between a noreturn function call and
9294 exception handler prologue. */
9295
9296 if (current_function_has_exception_handlers ())
9297 alpha_pad_noreturn ();
9298
9299 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9300 alpha_handle_trap_shadows ();
9301
9302 /* Due to the number of extra trapb insns, don't bother fixing up
9303 alignment when trap precision is instruction. Moreover, we can
9304 only do our job when sched2 is run. */
9305 if (optimize && !optimize_size
9306 && alpha_tp != ALPHA_TP_INSN
9307 && flag_schedule_insns_after_reload)
9308 {
9309 if (alpha_tune == PROCESSOR_EV4)
9310 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9311 else if (alpha_tune == PROCESSOR_EV5)
9312 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9313 }
9314 }
9315 \f
9316 #ifdef HAVE_STAMP_H
9317 #include <stamp.h>
9318 #endif
9319
9320 static void
9321 alpha_file_start (void)
9322 {
9323 #ifdef OBJECT_FORMAT_ELF
9324 /* If emitting dwarf2 debug information, we cannot generate a .file
9325 directive to start the file, as it will conflict with dwarf2out
9326 file numbers. So it's only useful when emitting mdebug output. */
9327 targetm.asm_file_start_file_directive = (write_symbols == DBX_DEBUG);
9328 #endif
9329
9330 default_file_start ();
9331 #ifdef MS_STAMP
9332 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9333 #endif
9334
9335 fputs ("\t.set noreorder\n", asm_out_file);
9336 fputs ("\t.set volatile\n", asm_out_file);
9337 if (TARGET_ABI_OSF)
9338 fputs ("\t.set noat\n", asm_out_file);
9339 if (TARGET_EXPLICIT_RELOCS)
9340 fputs ("\t.set nomacro\n", asm_out_file);
9341 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9342 {
9343 const char *arch;
9344
9345 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9346 arch = "ev6";
9347 else if (TARGET_MAX)
9348 arch = "pca56";
9349 else if (TARGET_BWX)
9350 arch = "ev56";
9351 else if (alpha_cpu == PROCESSOR_EV5)
9352 arch = "ev5";
9353 else
9354 arch = "ev4";
9355
9356 fprintf (asm_out_file, "\t.arch %s\n", arch);
9357 }
9358 }
9359
9360 #ifdef OBJECT_FORMAT_ELF
9361 /* Since we don't have a .dynbss section, we should not allow global
9362 relocations in the .rodata section. */
9363
9364 static int
9365 alpha_elf_reloc_rw_mask (void)
9366 {
9367 return flag_pic ? 3 : 2;
9368 }
9369
9370 /* Return a section for X. The only special thing we do here is to
9371 honor small data. */
9372
9373 static section *
9374 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9375 unsigned HOST_WIDE_INT align)
9376 {
9377 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9378 /* ??? Consider using mergeable sdata sections. */
9379 return sdata_section;
9380 else
9381 return default_elf_select_rtx_section (mode, x, align);
9382 }
9383
9384 static unsigned int
9385 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9386 {
9387 unsigned int flags = 0;
9388
9389 if (strcmp (name, ".sdata") == 0
9390 || strncmp (name, ".sdata.", 7) == 0
9391 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9392 || strcmp (name, ".sbss") == 0
9393 || strncmp (name, ".sbss.", 6) == 0
9394 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9395 flags = SECTION_SMALL;
9396
9397 flags |= default_section_type_flags (decl, name, reloc);
9398 return flags;
9399 }
9400 #endif /* OBJECT_FORMAT_ELF */
9401 \f
9402 /* Structure to collect function names for final output in link section. */
9403 /* Note that items marked with GTY can't be ifdef'ed out. */
9404
9405 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9406 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9407
9408 struct GTY(()) alpha_links
9409 {
9410 int num;
9411 const char *target;
9412 rtx linkage;
9413 enum links_kind lkind;
9414 enum reloc_kind rkind;
9415 };
9416
9417 struct GTY(()) alpha_funcs
9418 {
9419 int num;
9420 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9421 links;
9422 };
9423
9424 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9425 splay_tree alpha_links_tree;
9426 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9427 splay_tree alpha_funcs_tree;
9428
9429 static GTY(()) int alpha_funcs_num;
9430
9431 #if TARGET_ABI_OPEN_VMS
9432
9433 /* Return the VMS argument type corresponding to MODE. */
9434
9435 enum avms_arg_type
9436 alpha_arg_type (enum machine_mode mode)
9437 {
9438 switch (mode)
9439 {
9440 case SFmode:
9441 return TARGET_FLOAT_VAX ? FF : FS;
9442 case DFmode:
9443 return TARGET_FLOAT_VAX ? FD : FT;
9444 default:
9445 return I64;
9446 }
9447 }
9448
9449 /* Return an rtx for an integer representing the VMS Argument Information
9450 register value. */
9451
9452 rtx
9453 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9454 {
9455 unsigned HOST_WIDE_INT regval = cum.num_args;
9456 int i;
9457
9458 for (i = 0; i < 6; i++)
9459 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9460
9461 return GEN_INT (regval);
9462 }
9463 \f
9464 /* Register the need for a (fake) .linkage entry for calls to function NAME.
9465 IS_LOCAL is 1 if this is for a definition, 0 if this is for a real call.
9466 Return a SYMBOL_REF suited to the call instruction. */
9467
9468 rtx
9469 alpha_need_linkage (const char *name, int is_local)
9470 {
9471 splay_tree_node node;
9472 struct alpha_links *al;
9473 const char *target;
9474 tree id;
9475
9476 if (name[0] == '*')
9477 name++;
9478
9479 if (is_local)
9480 {
9481 struct alpha_funcs *cfaf;
9482
9483 if (!alpha_funcs_tree)
9484 alpha_funcs_tree = splay_tree_new_ggc
9485 (splay_tree_compare_pointers,
9486 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
9487 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
9488
9489
9490 cfaf = ggc_alloc_alpha_funcs ();
9491
9492 cfaf->links = 0;
9493 cfaf->num = ++alpha_funcs_num;
9494
9495 splay_tree_insert (alpha_funcs_tree,
9496 (splay_tree_key) current_function_decl,
9497 (splay_tree_value) cfaf);
9498 }
9499
9500 if (alpha_links_tree)
9501 {
9502 /* Is this name already defined? */
9503
9504 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9505 if (node)
9506 {
9507 al = (struct alpha_links *) node->value;
9508 if (is_local)
9509 {
9510 /* Defined here but external assumed. */
9511 if (al->lkind == KIND_EXTERN)
9512 al->lkind = KIND_LOCAL;
9513 }
9514 else
9515 {
9516 /* Used here but unused assumed. */
9517 if (al->lkind == KIND_UNUSED)
9518 al->lkind = KIND_LOCAL;
9519 }
9520 return al->linkage;
9521 }
9522 }
9523 else
9524 alpha_links_tree = splay_tree_new_ggc
9525 ((splay_tree_compare_fn) strcmp,
9526 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9527 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9528
9529 al = ggc_alloc_alpha_links ();
9530 name = ggc_strdup (name);
9531
9532 /* Assume external if no definition. */
9533 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9534
9535 /* Ensure we have an IDENTIFIER so assemble_name can mark it used
9536 and find the ultimate alias target like assemble_name. */
9537 id = get_identifier (name);
9538 target = NULL;
9539 while (IDENTIFIER_TRANSPARENT_ALIAS (id))
9540 {
9541 id = TREE_CHAIN (id);
9542 target = IDENTIFIER_POINTER (id);
9543 }
9544
9545 al->target = target ? target : name;
9546 al->linkage = gen_rtx_SYMBOL_REF (Pmode, name);
9547
9548 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9549 (splay_tree_value) al);
9550
9551 return al->linkage;
9552 }
9553
9554 /* Return a SYMBOL_REF representing the reference to the .linkage entry
9555 of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
9556 this is the reference to the linkage pointer value, 0 if this is the
9557 reference to the function entry value. RFLAG is 1 if this a reduced
9558 reference (code address only), 0 if this is a full reference. */
9559
9560 rtx
9561 alpha_use_linkage (rtx func, tree cfundecl, int lflag, int rflag)
9562 {
9563 splay_tree_node cfunnode;
9564 struct alpha_funcs *cfaf;
9565 struct alpha_links *al;
9566 const char *name = XSTR (func, 0);
9567
9568 cfaf = (struct alpha_funcs *) 0;
9569 al = (struct alpha_links *) 0;
9570
9571 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9572 cfaf = (struct alpha_funcs *) cfunnode->value;
9573
9574 if (cfaf->links)
9575 {
9576 splay_tree_node lnode;
9577
9578 /* Is this name already defined? */
9579
9580 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9581 if (lnode)
9582 al = (struct alpha_links *) lnode->value;
9583 }
9584 else
9585 cfaf->links = splay_tree_new_ggc
9586 ((splay_tree_compare_fn) strcmp,
9587 ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
9588 ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
9589
9590 if (!al)
9591 {
9592 size_t name_len;
9593 size_t buflen;
9594 char *linksym;
9595 splay_tree_node node = 0;
9596 struct alpha_links *anl;
9597
9598 if (name[0] == '*')
9599 name++;
9600
9601 name_len = strlen (name);
9602 linksym = (char *) alloca (name_len + 50);
9603
9604 al = ggc_alloc_alpha_links ();
9605 al->num = cfaf->num;
9606 al->target = NULL;
9607
9608 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9609 if (node)
9610 {
9611 anl = (struct alpha_links *) node->value;
9612 al->lkind = anl->lkind;
9613 name = anl->target;
9614 }
9615
9616 sprintf (linksym, "$%d..%s..lk", cfaf->num, name);
9617 buflen = strlen (linksym);
9618
9619 al->linkage = gen_rtx_SYMBOL_REF
9620 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9621
9622 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9623 (splay_tree_value) al);
9624 }
9625
9626 if (rflag)
9627 al->rkind = KIND_CODEADDR;
9628 else
9629 al->rkind = KIND_LINKAGE;
9630
9631 if (lflag)
9632 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9633 else
9634 return al->linkage;
9635 }
9636
9637 static int
9638 alpha_write_one_linkage (splay_tree_node node, void *data)
9639 {
9640 const char *const name = (const char *) node->key;
9641 struct alpha_links *link = (struct alpha_links *) node->value;
9642 FILE *stream = (FILE *) data;
9643
9644 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9645 if (link->rkind == KIND_CODEADDR)
9646 {
9647 if (link->lkind == KIND_LOCAL)
9648 {
9649 /* Local and used */
9650 fprintf (stream, "\t.quad %s..en\n", name);
9651 }
9652 else
9653 {
9654 /* External and used, request code address. */
9655 fprintf (stream, "\t.code_address %s\n", name);
9656 }
9657 }
9658 else
9659 {
9660 if (link->lkind == KIND_LOCAL)
9661 {
9662 /* Local and used, build linkage pair. */
9663 fprintf (stream, "\t.quad %s..en\n", name);
9664 fprintf (stream, "\t.quad %s\n", name);
9665 }
9666 else
9667 {
9668 /* External and used, request linkage pair. */
9669 fprintf (stream, "\t.linkage %s\n", name);
9670 }
9671 }
9672
9673 return 0;
9674 }
9675
9676 static void
9677 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9678 {
9679 splay_tree_node node;
9680 struct alpha_funcs *func;
9681
9682 fprintf (stream, "\t.link\n");
9683 fprintf (stream, "\t.align 3\n");
9684 in_section = NULL;
9685
9686 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9687 func = (struct alpha_funcs *) node->value;
9688
9689 fputs ("\t.name ", stream);
9690 assemble_name (stream, funname);
9691 fputs ("..na\n", stream);
9692 ASM_OUTPUT_LABEL (stream, funname);
9693 fprintf (stream, "\t.pdesc ");
9694 assemble_name (stream, funname);
9695 fprintf (stream, "..en,%s\n",
9696 alpha_procedure_type == PT_STACK ? "stack"
9697 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9698
9699 if (func->links)
9700 {
9701 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9702 /* splay_tree_delete (func->links); */
9703 }
9704 }
9705
9706 /* Switch to an arbitrary section NAME with attributes as specified
9707 by FLAGS. ALIGN specifies any known alignment requirements for
9708 the section; 0 if the default should be used. */
9709
9710 static void
9711 vms_asm_named_section (const char *name, unsigned int flags,
9712 tree decl ATTRIBUTE_UNUSED)
9713 {
9714 fputc ('\n', asm_out_file);
9715 fprintf (asm_out_file, ".section\t%s", name);
9716
9717 if (flags & SECTION_DEBUG)
9718 fprintf (asm_out_file, ",NOWRT");
9719
9720 fputc ('\n', asm_out_file);
9721 }
9722
9723 /* Record an element in the table of global constructors. SYMBOL is
9724 a SYMBOL_REF of the function to be called; PRIORITY is a number
9725 between 0 and MAX_INIT_PRIORITY.
9726
9727 Differs from default_ctors_section_asm_out_constructor in that the
9728 width of the .ctors entry is always 64 bits, rather than the 32 bits
9729 used by a normal pointer. */
9730
9731 static void
9732 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9733 {
9734 switch_to_section (ctors_section);
9735 assemble_align (BITS_PER_WORD);
9736 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9737 }
9738
9739 static void
9740 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9741 {
9742 switch_to_section (dtors_section);
9743 assemble_align (BITS_PER_WORD);
9744 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9745 }
9746 #else
9747
9748 rtx
9749 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9750 int is_local ATTRIBUTE_UNUSED)
9751 {
9752 return NULL_RTX;
9753 }
9754
9755 rtx
9756 alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
9757 tree cfundecl ATTRIBUTE_UNUSED,
9758 int lflag ATTRIBUTE_UNUSED,
9759 int rflag ATTRIBUTE_UNUSED)
9760 {
9761 return NULL_RTX;
9762 }
9763
9764 #endif /* TARGET_ABI_OPEN_VMS */
9765 \f
9766 static void
9767 alpha_init_libfuncs (void)
9768 {
9769 if (TARGET_ABI_OPEN_VMS)
9770 {
9771 /* Use the VMS runtime library functions for division and
9772 remainder. */
9773 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9774 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9775 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9776 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9777 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9778 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9779 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9780 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9781 abort_libfunc = init_one_libfunc ("decc$abort");
9782 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
9783 #ifdef MEM_LIBFUNCS_INIT
9784 MEM_LIBFUNCS_INIT;
9785 #endif
9786 }
9787 }
9788
9789 /* On the Alpha, we use this to disable the floating-point registers
9790 when they don't exist. */
9791
9792 static void
9793 alpha_conditional_register_usage (void)
9794 {
9795 int i;
9796 if (! TARGET_FPREGS)
9797 for (i = 32; i < 63; i++)
9798 fixed_regs[i] = call_used_regs[i] = 1;
9799 }
9800 \f
9801 /* Initialize the GCC target structure. */
9802 #if TARGET_ABI_OPEN_VMS
9803 # undef TARGET_ATTRIBUTE_TABLE
9804 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
9805 # undef TARGET_CAN_ELIMINATE
9806 # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
9807 #endif
9808
9809 #undef TARGET_IN_SMALL_DATA_P
9810 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
9811
9812 #undef TARGET_ASM_ALIGNED_HI_OP
9813 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
9814 #undef TARGET_ASM_ALIGNED_DI_OP
9815 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
9816
9817 /* Default unaligned ops are provided for ELF systems. To get unaligned
9818 data for non-ELF systems, we have to turn off auto alignment. */
9819 #if !defined (OBJECT_FORMAT_ELF) || TARGET_ABI_OPEN_VMS
9820 #undef TARGET_ASM_UNALIGNED_HI_OP
9821 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
9822 #undef TARGET_ASM_UNALIGNED_SI_OP
9823 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
9824 #undef TARGET_ASM_UNALIGNED_DI_OP
9825 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
9826 #endif
9827
9828 #ifdef OBJECT_FORMAT_ELF
9829 #undef TARGET_ASM_RELOC_RW_MASK
9830 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
9831 #undef TARGET_ASM_SELECT_RTX_SECTION
9832 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
9833 #undef TARGET_SECTION_TYPE_FLAGS
9834 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
9835 #endif
9836
9837 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
9838 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
9839
9840 #undef TARGET_INIT_LIBFUNCS
9841 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
9842
9843 #undef TARGET_LEGITIMIZE_ADDRESS
9844 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
9845
9846 #undef TARGET_ASM_FILE_START
9847 #define TARGET_ASM_FILE_START alpha_file_start
9848 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
9849 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
9850
9851 #undef TARGET_SCHED_ADJUST_COST
9852 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
9853 #undef TARGET_SCHED_ISSUE_RATE
9854 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
9855 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
9856 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
9857 alpha_multipass_dfa_lookahead
9858
9859 #undef TARGET_HAVE_TLS
9860 #define TARGET_HAVE_TLS HAVE_AS_TLS
9861
9862 #undef TARGET_BUILTIN_DECL
9863 #define TARGET_BUILTIN_DECL alpha_builtin_decl
9864 #undef TARGET_INIT_BUILTINS
9865 #define TARGET_INIT_BUILTINS alpha_init_builtins
9866 #undef TARGET_EXPAND_BUILTIN
9867 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
9868 #undef TARGET_FOLD_BUILTIN
9869 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
9870
9871 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
9872 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
9873 #undef TARGET_CANNOT_COPY_INSN_P
9874 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
9875 #undef TARGET_CANNOT_FORCE_CONST_MEM
9876 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
9877
9878 #if TARGET_ABI_OSF
9879 #undef TARGET_ASM_OUTPUT_MI_THUNK
9880 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
9881 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
9882 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
9883 #undef TARGET_STDARG_OPTIMIZE_HOOK
9884 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
9885 #endif
9886
9887 #undef TARGET_RTX_COSTS
9888 #define TARGET_RTX_COSTS alpha_rtx_costs
9889 #undef TARGET_ADDRESS_COST
9890 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
9891
9892 #undef TARGET_MACHINE_DEPENDENT_REORG
9893 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
9894
9895 #undef TARGET_PROMOTE_FUNCTION_MODE
9896 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
9897 #undef TARGET_PROMOTE_PROTOTYPES
9898 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
9899 #undef TARGET_RETURN_IN_MEMORY
9900 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
9901 #undef TARGET_PASS_BY_REFERENCE
9902 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
9903 #undef TARGET_SETUP_INCOMING_VARARGS
9904 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
9905 #undef TARGET_STRICT_ARGUMENT_NAMING
9906 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
9907 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
9908 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
9909 #undef TARGET_SPLIT_COMPLEX_ARG
9910 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
9911 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
9912 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
9913 #undef TARGET_ARG_PARTIAL_BYTES
9914 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
9915 #undef TARGET_FUNCTION_ARG
9916 #define TARGET_FUNCTION_ARG alpha_function_arg
9917 #undef TARGET_FUNCTION_ARG_ADVANCE
9918 #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
9919 #undef TARGET_TRAMPOLINE_INIT
9920 #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
9921
9922 #undef TARGET_SECONDARY_RELOAD
9923 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
9924
9925 #undef TARGET_SCALAR_MODE_SUPPORTED_P
9926 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
9927 #undef TARGET_VECTOR_MODE_SUPPORTED_P
9928 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
9929
9930 #undef TARGET_BUILD_BUILTIN_VA_LIST
9931 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
9932
9933 #undef TARGET_EXPAND_BUILTIN_VA_START
9934 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
9935
9936 /* The Alpha architecture does not require sequential consistency. See
9937 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
9938 for an example of how it can be violated in practice. */
9939 #undef TARGET_RELAXED_ORDERING
9940 #define TARGET_RELAXED_ORDERING true
9941
9942 #undef TARGET_DEFAULT_TARGET_FLAGS
9943 #define TARGET_DEFAULT_TARGET_FLAGS \
9944 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
9945 #undef TARGET_HANDLE_OPTION
9946 #define TARGET_HANDLE_OPTION alpha_handle_option
9947
9948 #undef TARGET_OPTION_OVERRIDE
9949 #define TARGET_OPTION_OVERRIDE alpha_option_override
9950
9951 #undef TARGET_OPTION_OPTIMIZATION_TABLE
9952 #define TARGET_OPTION_OPTIMIZATION_TABLE alpha_option_optimization_table
9953
9954 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9955 #undef TARGET_MANGLE_TYPE
9956 #define TARGET_MANGLE_TYPE alpha_mangle_type
9957 #endif
9958
9959 #undef TARGET_LEGITIMATE_ADDRESS_P
9960 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
9961
9962 #undef TARGET_CONDITIONAL_REGISTER_USAGE
9963 #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
9964
9965 struct gcc_target targetm = TARGET_INITIALIZER;
9966
9967 \f
9968 #include "gt-alpha.h"