re PR target/32325 (cc1plus ICE configuring libstdc++ on Tru64 UNIX V5.1B: SEGV in...
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
12
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "recog.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "reload.h"
41 #include "obstack.h"
42 #include "except.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "ggc.h"
46 #include "integrate.h"
47 #include "tm_p.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "debug.h"
51 #include "langhooks.h"
52 #include <splay-tree.h>
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
55 #include "tree-flow.h"
56 #include "tree-stdarg.h"
57 #include "tm-constrs.h"
58 #include "df.h"
59
60 /* Specify which cpu to schedule for. */
61 enum processor_type alpha_tune;
62
63 /* Which cpu we're generating code for. */
64 enum processor_type alpha_cpu;
65
66 static const char * const alpha_cpu_name[] =
67 {
68 "ev4", "ev5", "ev6"
69 };
70
71 /* Specify how accurate floating-point traps need to be. */
72
73 enum alpha_trap_precision alpha_tp;
74
75 /* Specify the floating-point rounding mode. */
76
77 enum alpha_fp_rounding_mode alpha_fprm;
78
79 /* Specify which things cause traps. */
80
81 enum alpha_fp_trap_mode alpha_fptm;
82
83 /* Save information from a "cmpxx" operation until the branch or scc is
84 emitted. */
85
86 struct alpha_compare alpha_compare;
87
88 /* Nonzero if inside of a function, because the Alpha asm can't
89 handle .files inside of functions. */
90
91 static int inside_function = FALSE;
92
93 /* The number of cycles of latency we should assume on memory reads. */
94
95 int alpha_memory_latency = 3;
96
97 /* Whether the function needs the GP. */
98
99 static int alpha_function_needs_gp;
100
101 /* The alias set for prologue/epilogue register save/restore. */
102
103 static GTY(()) alias_set_type alpha_sr_alias_set;
104
105 /* The assembler name of the current function. */
106
107 static const char *alpha_fnname;
108
109 /* The next explicit relocation sequence number. */
110 extern GTY(()) int alpha_next_sequence_number;
111 int alpha_next_sequence_number = 1;
112
113 /* The literal and gpdisp sequence numbers for this insn, as printed
114 by %# and %* respectively. */
115 extern GTY(()) int alpha_this_literal_sequence_number;
116 extern GTY(()) int alpha_this_gpdisp_sequence_number;
117 int alpha_this_literal_sequence_number;
118 int alpha_this_gpdisp_sequence_number;
119
120 /* Costs of various operations on the different architectures. */
121
122 struct alpha_rtx_cost_data
123 {
124 unsigned char fp_add;
125 unsigned char fp_mult;
126 unsigned char fp_div_sf;
127 unsigned char fp_div_df;
128 unsigned char int_mult_si;
129 unsigned char int_mult_di;
130 unsigned char int_shift;
131 unsigned char int_cmov;
132 unsigned short int_div;
133 };
134
135 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
136 {
137 { /* EV4 */
138 COSTS_N_INSNS (6), /* fp_add */
139 COSTS_N_INSNS (6), /* fp_mult */
140 COSTS_N_INSNS (34), /* fp_div_sf */
141 COSTS_N_INSNS (63), /* fp_div_df */
142 COSTS_N_INSNS (23), /* int_mult_si */
143 COSTS_N_INSNS (23), /* int_mult_di */
144 COSTS_N_INSNS (2), /* int_shift */
145 COSTS_N_INSNS (2), /* int_cmov */
146 COSTS_N_INSNS (97), /* int_div */
147 },
148 { /* EV5 */
149 COSTS_N_INSNS (4), /* fp_add */
150 COSTS_N_INSNS (4), /* fp_mult */
151 COSTS_N_INSNS (15), /* fp_div_sf */
152 COSTS_N_INSNS (22), /* fp_div_df */
153 COSTS_N_INSNS (8), /* int_mult_si */
154 COSTS_N_INSNS (12), /* int_mult_di */
155 COSTS_N_INSNS (1) + 1, /* int_shift */
156 COSTS_N_INSNS (1), /* int_cmov */
157 COSTS_N_INSNS (83), /* int_div */
158 },
159 { /* EV6 */
160 COSTS_N_INSNS (4), /* fp_add */
161 COSTS_N_INSNS (4), /* fp_mult */
162 COSTS_N_INSNS (12), /* fp_div_sf */
163 COSTS_N_INSNS (15), /* fp_div_df */
164 COSTS_N_INSNS (7), /* int_mult_si */
165 COSTS_N_INSNS (7), /* int_mult_di */
166 COSTS_N_INSNS (1), /* int_shift */
167 COSTS_N_INSNS (2), /* int_cmov */
168 COSTS_N_INSNS (86), /* int_div */
169 },
170 };
171
172 /* Similar but tuned for code size instead of execution latency. The
173 extra +N is fractional cost tuning based on latency. It's used to
174 encourage use of cheaper insns like shift, but only if there's just
175 one of them. */
176
177 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
178 {
179 COSTS_N_INSNS (1), /* fp_add */
180 COSTS_N_INSNS (1), /* fp_mult */
181 COSTS_N_INSNS (1), /* fp_div_sf */
182 COSTS_N_INSNS (1) + 1, /* fp_div_df */
183 COSTS_N_INSNS (1) + 1, /* int_mult_si */
184 COSTS_N_INSNS (1) + 2, /* int_mult_di */
185 COSTS_N_INSNS (1), /* int_shift */
186 COSTS_N_INSNS (1), /* int_cmov */
187 COSTS_N_INSNS (6), /* int_div */
188 };
189
190 /* Get the number of args of a function in one of two ways. */
191 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
192 #define NUM_ARGS current_function_args_info.num_args
193 #else
194 #define NUM_ARGS current_function_args_info
195 #endif
196
197 #define REG_PV 27
198 #define REG_RA 26
199
200 /* Declarations of static functions. */
201 static struct machine_function *alpha_init_machine_status (void);
202 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
203
204 #if TARGET_ABI_OPEN_VMS
205 static void alpha_write_linkage (FILE *, const char *, tree);
206 #endif
207
208 static void unicosmk_output_deferred_case_vectors (FILE *);
209 static void unicosmk_gen_dsib (unsigned long *);
210 static void unicosmk_output_ssib (FILE *, const char *);
211 static int unicosmk_need_dex (rtx);
212 \f
213 /* Implement TARGET_HANDLE_OPTION. */
214
215 static bool
216 alpha_handle_option (size_t code, const char *arg, int value)
217 {
218 switch (code)
219 {
220 case OPT_mfp_regs:
221 if (value == 0)
222 target_flags |= MASK_SOFT_FP;
223 break;
224
225 case OPT_mieee:
226 case OPT_mieee_with_inexact:
227 target_flags |= MASK_IEEE_CONFORMANT;
228 break;
229
230 case OPT_mtls_size_:
231 if (value != 16 && value != 32 && value != 64)
232 error ("bad value %qs for -mtls-size switch", arg);
233 break;
234 }
235
236 return true;
237 }
238
239 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
240 /* Implement TARGET_MANGLE_TYPE. */
241
242 static const char *
243 alpha_mangle_type (const_tree type)
244 {
245 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
246 && TARGET_LONG_DOUBLE_128)
247 return "g";
248
249 /* For all other types, use normal C++ mangling. */
250 return NULL;
251 }
252 #endif
253
254 /* Parse target option strings. */
255
256 void
257 override_options (void)
258 {
259 static const struct cpu_table {
260 const char *const name;
261 const enum processor_type processor;
262 const int flags;
263 } cpu_table[] = {
264 { "ev4", PROCESSOR_EV4, 0 },
265 { "ev45", PROCESSOR_EV4, 0 },
266 { "21064", PROCESSOR_EV4, 0 },
267 { "ev5", PROCESSOR_EV5, 0 },
268 { "21164", PROCESSOR_EV5, 0 },
269 { "ev56", PROCESSOR_EV5, MASK_BWX },
270 { "21164a", PROCESSOR_EV5, MASK_BWX },
271 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
272 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
273 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
274 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
275 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
276 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
277 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
278 { 0, 0, 0 }
279 };
280
281 int i;
282
283 /* Unicos/Mk doesn't have shared libraries. */
284 if (TARGET_ABI_UNICOSMK && flag_pic)
285 {
286 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
287 (flag_pic > 1) ? "PIC" : "pic");
288 flag_pic = 0;
289 }
290
291 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
292 floating-point instructions. Make that the default for this target. */
293 if (TARGET_ABI_UNICOSMK)
294 alpha_fprm = ALPHA_FPRM_DYN;
295 else
296 alpha_fprm = ALPHA_FPRM_NORM;
297
298 alpha_tp = ALPHA_TP_PROG;
299 alpha_fptm = ALPHA_FPTM_N;
300
301 /* We cannot use su and sui qualifiers for conversion instructions on
302 Unicos/Mk. I'm not sure if this is due to assembler or hardware
303 limitations. Right now, we issue a warning if -mieee is specified
304 and then ignore it; eventually, we should either get it right or
305 disable the option altogether. */
306
307 if (TARGET_IEEE)
308 {
309 if (TARGET_ABI_UNICOSMK)
310 warning (0, "-mieee not supported on Unicos/Mk");
311 else
312 {
313 alpha_tp = ALPHA_TP_INSN;
314 alpha_fptm = ALPHA_FPTM_SU;
315 }
316 }
317
318 if (TARGET_IEEE_WITH_INEXACT)
319 {
320 if (TARGET_ABI_UNICOSMK)
321 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
322 else
323 {
324 alpha_tp = ALPHA_TP_INSN;
325 alpha_fptm = ALPHA_FPTM_SUI;
326 }
327 }
328
329 if (alpha_tp_string)
330 {
331 if (! strcmp (alpha_tp_string, "p"))
332 alpha_tp = ALPHA_TP_PROG;
333 else if (! strcmp (alpha_tp_string, "f"))
334 alpha_tp = ALPHA_TP_FUNC;
335 else if (! strcmp (alpha_tp_string, "i"))
336 alpha_tp = ALPHA_TP_INSN;
337 else
338 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
339 }
340
341 if (alpha_fprm_string)
342 {
343 if (! strcmp (alpha_fprm_string, "n"))
344 alpha_fprm = ALPHA_FPRM_NORM;
345 else if (! strcmp (alpha_fprm_string, "m"))
346 alpha_fprm = ALPHA_FPRM_MINF;
347 else if (! strcmp (alpha_fprm_string, "c"))
348 alpha_fprm = ALPHA_FPRM_CHOP;
349 else if (! strcmp (alpha_fprm_string,"d"))
350 alpha_fprm = ALPHA_FPRM_DYN;
351 else
352 error ("bad value %qs for -mfp-rounding-mode switch",
353 alpha_fprm_string);
354 }
355
356 if (alpha_fptm_string)
357 {
358 if (strcmp (alpha_fptm_string, "n") == 0)
359 alpha_fptm = ALPHA_FPTM_N;
360 else if (strcmp (alpha_fptm_string, "u") == 0)
361 alpha_fptm = ALPHA_FPTM_U;
362 else if (strcmp (alpha_fptm_string, "su") == 0)
363 alpha_fptm = ALPHA_FPTM_SU;
364 else if (strcmp (alpha_fptm_string, "sui") == 0)
365 alpha_fptm = ALPHA_FPTM_SUI;
366 else
367 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
368 }
369
370 if (alpha_cpu_string)
371 {
372 for (i = 0; cpu_table [i].name; i++)
373 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
374 {
375 alpha_tune = alpha_cpu = cpu_table [i].processor;
376 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
377 target_flags |= cpu_table [i].flags;
378 break;
379 }
380 if (! cpu_table [i].name)
381 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
382 }
383
384 if (alpha_tune_string)
385 {
386 for (i = 0; cpu_table [i].name; i++)
387 if (! strcmp (alpha_tune_string, cpu_table [i].name))
388 {
389 alpha_tune = cpu_table [i].processor;
390 break;
391 }
392 if (! cpu_table [i].name)
393 error ("bad value %qs for -mcpu switch", alpha_tune_string);
394 }
395
396 /* Do some sanity checks on the above options. */
397
398 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
399 {
400 warning (0, "trap mode not supported on Unicos/Mk");
401 alpha_fptm = ALPHA_FPTM_N;
402 }
403
404 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
405 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
406 {
407 warning (0, "fp software completion requires -mtrap-precision=i");
408 alpha_tp = ALPHA_TP_INSN;
409 }
410
411 if (alpha_cpu == PROCESSOR_EV6)
412 {
413 /* Except for EV6 pass 1 (not released), we always have precise
414 arithmetic traps. Which means we can do software completion
415 without minding trap shadows. */
416 alpha_tp = ALPHA_TP_PROG;
417 }
418
419 if (TARGET_FLOAT_VAX)
420 {
421 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
422 {
423 warning (0, "rounding mode not supported for VAX floats");
424 alpha_fprm = ALPHA_FPRM_NORM;
425 }
426 if (alpha_fptm == ALPHA_FPTM_SUI)
427 {
428 warning (0, "trap mode not supported for VAX floats");
429 alpha_fptm = ALPHA_FPTM_SU;
430 }
431 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
432 warning (0, "128-bit long double not supported for VAX floats");
433 target_flags &= ~MASK_LONG_DOUBLE_128;
434 }
435
436 {
437 char *end;
438 int lat;
439
440 if (!alpha_mlat_string)
441 alpha_mlat_string = "L1";
442
443 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
444 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
445 ;
446 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
447 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
448 && alpha_mlat_string[2] == '\0')
449 {
450 static int const cache_latency[][4] =
451 {
452 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
453 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
454 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
455 };
456
457 lat = alpha_mlat_string[1] - '0';
458 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
459 {
460 warning (0, "L%d cache latency unknown for %s",
461 lat, alpha_cpu_name[alpha_tune]);
462 lat = 3;
463 }
464 else
465 lat = cache_latency[alpha_tune][lat-1];
466 }
467 else if (! strcmp (alpha_mlat_string, "main"))
468 {
469 /* Most current memories have about 370ns latency. This is
470 a reasonable guess for a fast cpu. */
471 lat = 150;
472 }
473 else
474 {
475 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
476 lat = 3;
477 }
478
479 alpha_memory_latency = lat;
480 }
481
482 /* Default the definition of "small data" to 8 bytes. */
483 if (!g_switch_set)
484 g_switch_value = 8;
485
486 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
487 if (flag_pic == 1)
488 target_flags |= MASK_SMALL_DATA;
489 else if (flag_pic == 2)
490 target_flags &= ~MASK_SMALL_DATA;
491
492 /* Align labels and loops for optimal branching. */
493 /* ??? Kludge these by not doing anything if we don't optimize and also if
494 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
495 if (optimize > 0 && write_symbols != SDB_DEBUG)
496 {
497 if (align_loops <= 0)
498 align_loops = 16;
499 if (align_jumps <= 0)
500 align_jumps = 16;
501 }
502 if (align_functions <= 0)
503 align_functions = 16;
504
505 /* Acquire a unique set number for our register saves and restores. */
506 alpha_sr_alias_set = new_alias_set ();
507
508 /* Register variables and functions with the garbage collector. */
509
510 /* Set up function hooks. */
511 init_machine_status = alpha_init_machine_status;
512
513 /* Tell the compiler when we're using VAX floating point. */
514 if (TARGET_FLOAT_VAX)
515 {
516 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
517 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
518 REAL_MODE_FORMAT (TFmode) = NULL;
519 }
520
521 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
522 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
523 target_flags |= MASK_LONG_DOUBLE_128;
524 #endif
525 }
526 \f
527 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
528
529 int
530 zap_mask (HOST_WIDE_INT value)
531 {
532 int i;
533
534 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
535 i++, value >>= 8)
536 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
537 return 0;
538
539 return 1;
540 }
541
542 /* Return true if OP is valid for a particular TLS relocation.
543 We are already guaranteed that OP is a CONST. */
544
545 int
546 tls_symbolic_operand_1 (rtx op, int size, int unspec)
547 {
548 op = XEXP (op, 0);
549
550 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
551 return 0;
552 op = XVECEXP (op, 0, 0);
553
554 if (GET_CODE (op) != SYMBOL_REF)
555 return 0;
556
557 switch (SYMBOL_REF_TLS_MODEL (op))
558 {
559 case TLS_MODEL_LOCAL_DYNAMIC:
560 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
561 case TLS_MODEL_INITIAL_EXEC:
562 return unspec == UNSPEC_TPREL && size == 64;
563 case TLS_MODEL_LOCAL_EXEC:
564 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
565 default:
566 gcc_unreachable ();
567 }
568 }
569
570 /* Used by aligned_memory_operand and unaligned_memory_operand to
571 resolve what reload is going to do with OP if it's a register. */
572
573 rtx
574 resolve_reload_operand (rtx op)
575 {
576 if (reload_in_progress)
577 {
578 rtx tmp = op;
579 if (GET_CODE (tmp) == SUBREG)
580 tmp = SUBREG_REG (tmp);
581 if (GET_CODE (tmp) == REG
582 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
583 {
584 op = reg_equiv_memory_loc[REGNO (tmp)];
585 if (op == 0)
586 return 0;
587 }
588 }
589 return op;
590 }
591
592 /* The scalar modes supported differs from the default check-what-c-supports
593 version in that sometimes TFmode is available even when long double
594 indicates only DFmode. On unicosmk, we have the situation that HImode
595 doesn't map to any C type, but of course we still support that. */
596
597 static bool
598 alpha_scalar_mode_supported_p (enum machine_mode mode)
599 {
600 switch (mode)
601 {
602 case QImode:
603 case HImode:
604 case SImode:
605 case DImode:
606 case TImode: /* via optabs.c */
607 return true;
608
609 case SFmode:
610 case DFmode:
611 return true;
612
613 case TFmode:
614 return TARGET_HAS_XFLOATING_LIBS;
615
616 default:
617 return false;
618 }
619 }
620
621 /* Alpha implements a couple of integer vector mode operations when
622 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
623 which allows the vectorizer to operate on e.g. move instructions,
624 or when expand_vector_operations can do something useful. */
625
626 static bool
627 alpha_vector_mode_supported_p (enum machine_mode mode)
628 {
629 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
630 }
631
632 /* Return 1 if this function can directly return via $26. */
633
634 int
635 direct_return (void)
636 {
637 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
638 && reload_completed
639 && alpha_sa_size () == 0
640 && get_frame_size () == 0
641 && current_function_outgoing_args_size == 0
642 && current_function_pretend_args_size == 0);
643 }
644
645 /* Return the ADDR_VEC associated with a tablejump insn. */
646
647 rtx
648 alpha_tablejump_addr_vec (rtx insn)
649 {
650 rtx tmp;
651
652 tmp = JUMP_LABEL (insn);
653 if (!tmp)
654 return NULL_RTX;
655 tmp = NEXT_INSN (tmp);
656 if (!tmp)
657 return NULL_RTX;
658 if (GET_CODE (tmp) == JUMP_INSN
659 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
660 return PATTERN (tmp);
661 return NULL_RTX;
662 }
663
664 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
665
666 rtx
667 alpha_tablejump_best_label (rtx insn)
668 {
669 rtx jump_table = alpha_tablejump_addr_vec (insn);
670 rtx best_label = NULL_RTX;
671
672 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
673 there for edge frequency counts from profile data. */
674
675 if (jump_table)
676 {
677 int n_labels = XVECLEN (jump_table, 1);
678 int best_count = -1;
679 int i, j;
680
681 for (i = 0; i < n_labels; i++)
682 {
683 int count = 1;
684
685 for (j = i + 1; j < n_labels; j++)
686 if (XEXP (XVECEXP (jump_table, 1, i), 0)
687 == XEXP (XVECEXP (jump_table, 1, j), 0))
688 count++;
689
690 if (count > best_count)
691 best_count = count, best_label = XVECEXP (jump_table, 1, i);
692 }
693 }
694
695 return best_label ? best_label : const0_rtx;
696 }
697
698 /* Return the TLS model to use for SYMBOL. */
699
700 static enum tls_model
701 tls_symbolic_operand_type (rtx symbol)
702 {
703 enum tls_model model;
704
705 if (GET_CODE (symbol) != SYMBOL_REF)
706 return 0;
707 model = SYMBOL_REF_TLS_MODEL (symbol);
708
709 /* Local-exec with a 64-bit size is the same code as initial-exec. */
710 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
711 model = TLS_MODEL_INITIAL_EXEC;
712
713 return model;
714 }
715 \f
716 /* Return true if the function DECL will share the same GP as any
717 function in the current unit of translation. */
718
719 static bool
720 decl_has_samegp (const_tree decl)
721 {
722 /* Functions that are not local can be overridden, and thus may
723 not share the same gp. */
724 if (!(*targetm.binds_local_p) (decl))
725 return false;
726
727 /* If -msmall-data is in effect, assume that there is only one GP
728 for the module, and so any local symbol has this property. We
729 need explicit relocations to be able to enforce this for symbols
730 not defined in this unit of translation, however. */
731 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
732 return true;
733
734 /* Functions that are not external are defined in this UoT. */
735 /* ??? Irritatingly, static functions not yet emitted are still
736 marked "external". Apply this to non-static functions only. */
737 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
738 }
739
740 /* Return true if EXP should be placed in the small data section. */
741
742 static bool
743 alpha_in_small_data_p (const_tree exp)
744 {
745 /* We want to merge strings, so we never consider them small data. */
746 if (TREE_CODE (exp) == STRING_CST)
747 return false;
748
749 /* Functions are never in the small data area. Duh. */
750 if (TREE_CODE (exp) == FUNCTION_DECL)
751 return false;
752
753 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
754 {
755 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
756 if (strcmp (section, ".sdata") == 0
757 || strcmp (section, ".sbss") == 0)
758 return true;
759 }
760 else
761 {
762 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
763
764 /* If this is an incomplete type with size 0, then we can't put it
765 in sdata because it might be too big when completed. */
766 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
767 return true;
768 }
769
770 return false;
771 }
772
773 #if TARGET_ABI_OPEN_VMS
774 static bool
775 alpha_linkage_symbol_p (const char *symname)
776 {
777 int symlen = strlen (symname);
778
779 if (symlen > 4)
780 return strcmp (&symname [symlen - 4], "..lk") == 0;
781
782 return false;
783 }
784
785 #define LINKAGE_SYMBOL_REF_P(X) \
786 ((GET_CODE (X) == SYMBOL_REF \
787 && alpha_linkage_symbol_p (XSTR (X, 0))) \
788 || (GET_CODE (X) == CONST \
789 && GET_CODE (XEXP (X, 0)) == PLUS \
790 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
791 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
792 #endif
793
794 /* legitimate_address_p recognizes an RTL expression that is a valid
795 memory address for an instruction. The MODE argument is the
796 machine mode for the MEM expression that wants to use this address.
797
798 For Alpha, we have either a constant address or the sum of a
799 register and a constant address, or just a register. For DImode,
800 any of those forms can be surrounded with an AND that clear the
801 low-order three bits; this is an "unaligned" access. */
802
803 bool
804 alpha_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
805 {
806 /* If this is an ldq_u type address, discard the outer AND. */
807 if (mode == DImode
808 && GET_CODE (x) == AND
809 && GET_CODE (XEXP (x, 1)) == CONST_INT
810 && INTVAL (XEXP (x, 1)) == -8)
811 x = XEXP (x, 0);
812
813 /* Discard non-paradoxical subregs. */
814 if (GET_CODE (x) == SUBREG
815 && (GET_MODE_SIZE (GET_MODE (x))
816 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
817 x = SUBREG_REG (x);
818
819 /* Unadorned general registers are valid. */
820 if (REG_P (x)
821 && (strict
822 ? STRICT_REG_OK_FOR_BASE_P (x)
823 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
824 return true;
825
826 /* Constant addresses (i.e. +/- 32k) are valid. */
827 if (CONSTANT_ADDRESS_P (x))
828 return true;
829
830 #if TARGET_ABI_OPEN_VMS
831 if (LINKAGE_SYMBOL_REF_P (x))
832 return true;
833 #endif
834
835 /* Register plus a small constant offset is valid. */
836 if (GET_CODE (x) == PLUS)
837 {
838 rtx ofs = XEXP (x, 1);
839 x = XEXP (x, 0);
840
841 /* Discard non-paradoxical subregs. */
842 if (GET_CODE (x) == SUBREG
843 && (GET_MODE_SIZE (GET_MODE (x))
844 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
845 x = SUBREG_REG (x);
846
847 if (REG_P (x))
848 {
849 if (! strict
850 && NONSTRICT_REG_OK_FP_BASE_P (x)
851 && GET_CODE (ofs) == CONST_INT)
852 return true;
853 if ((strict
854 ? STRICT_REG_OK_FOR_BASE_P (x)
855 : NONSTRICT_REG_OK_FOR_BASE_P (x))
856 && CONSTANT_ADDRESS_P (ofs))
857 return true;
858 }
859 }
860
861 /* If we're managing explicit relocations, LO_SUM is valid, as
862 are small data symbols. */
863 else if (TARGET_EXPLICIT_RELOCS)
864 {
865 if (small_symbolic_operand (x, Pmode))
866 return true;
867
868 if (GET_CODE (x) == LO_SUM)
869 {
870 rtx ofs = XEXP (x, 1);
871 x = XEXP (x, 0);
872
873 /* Discard non-paradoxical subregs. */
874 if (GET_CODE (x) == SUBREG
875 && (GET_MODE_SIZE (GET_MODE (x))
876 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
877 x = SUBREG_REG (x);
878
879 /* Must have a valid base register. */
880 if (! (REG_P (x)
881 && (strict
882 ? STRICT_REG_OK_FOR_BASE_P (x)
883 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
884 return false;
885
886 /* The symbol must be local. */
887 if (local_symbolic_operand (ofs, Pmode)
888 || dtp32_symbolic_operand (ofs, Pmode)
889 || tp32_symbolic_operand (ofs, Pmode))
890 return true;
891 }
892 }
893
894 return false;
895 }
896
897 /* Build the SYMBOL_REF for __tls_get_addr. */
898
899 static GTY(()) rtx tls_get_addr_libfunc;
900
901 static rtx
902 get_tls_get_addr (void)
903 {
904 if (!tls_get_addr_libfunc)
905 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
906 return tls_get_addr_libfunc;
907 }
908
909 /* Try machine-dependent ways of modifying an illegitimate address
910 to be legitimate. If we find one, return the new, valid address. */
911
912 rtx
913 alpha_legitimize_address (rtx x, rtx scratch,
914 enum machine_mode mode ATTRIBUTE_UNUSED)
915 {
916 HOST_WIDE_INT addend;
917
918 /* If the address is (plus reg const_int) and the CONST_INT is not a
919 valid offset, compute the high part of the constant and add it to
920 the register. Then our address is (plus temp low-part-const). */
921 if (GET_CODE (x) == PLUS
922 && GET_CODE (XEXP (x, 0)) == REG
923 && GET_CODE (XEXP (x, 1)) == CONST_INT
924 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
925 {
926 addend = INTVAL (XEXP (x, 1));
927 x = XEXP (x, 0);
928 goto split_addend;
929 }
930
931 /* If the address is (const (plus FOO const_int)), find the low-order
932 part of the CONST_INT. Then load FOO plus any high-order part of the
933 CONST_INT into a register. Our address is (plus reg low-part-const).
934 This is done to reduce the number of GOT entries. */
935 if (can_create_pseudo_p ()
936 && GET_CODE (x) == CONST
937 && GET_CODE (XEXP (x, 0)) == PLUS
938 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
939 {
940 addend = INTVAL (XEXP (XEXP (x, 0), 1));
941 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
942 goto split_addend;
943 }
944
945 /* If we have a (plus reg const), emit the load as in (2), then add
946 the two registers, and finally generate (plus reg low-part-const) as
947 our address. */
948 if (can_create_pseudo_p ()
949 && GET_CODE (x) == PLUS
950 && GET_CODE (XEXP (x, 0)) == REG
951 && GET_CODE (XEXP (x, 1)) == CONST
952 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
953 && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == CONST_INT)
954 {
955 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
956 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
957 XEXP (XEXP (XEXP (x, 1), 0), 0),
958 NULL_RTX, 1, OPTAB_LIB_WIDEN);
959 goto split_addend;
960 }
961
962 /* If this is a local symbol, split the address into HIGH/LO_SUM parts. */
963 if (TARGET_EXPLICIT_RELOCS && symbolic_operand (x, Pmode))
964 {
965 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
966
967 switch (tls_symbolic_operand_type (x))
968 {
969 case TLS_MODEL_NONE:
970 break;
971
972 case TLS_MODEL_GLOBAL_DYNAMIC:
973 start_sequence ();
974
975 r0 = gen_rtx_REG (Pmode, 0);
976 r16 = gen_rtx_REG (Pmode, 16);
977 tga = get_tls_get_addr ();
978 dest = gen_reg_rtx (Pmode);
979 seq = GEN_INT (alpha_next_sequence_number++);
980
981 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
982 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
983 insn = emit_call_insn (insn);
984 CONST_OR_PURE_CALL_P (insn) = 1;
985 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
986
987 insn = get_insns ();
988 end_sequence ();
989
990 emit_libcall_block (insn, dest, r0, x);
991 return dest;
992
993 case TLS_MODEL_LOCAL_DYNAMIC:
994 start_sequence ();
995
996 r0 = gen_rtx_REG (Pmode, 0);
997 r16 = gen_rtx_REG (Pmode, 16);
998 tga = get_tls_get_addr ();
999 scratch = gen_reg_rtx (Pmode);
1000 seq = GEN_INT (alpha_next_sequence_number++);
1001
1002 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1003 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1004 insn = emit_call_insn (insn);
1005 CONST_OR_PURE_CALL_P (insn) = 1;
1006 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1007
1008 insn = get_insns ();
1009 end_sequence ();
1010
1011 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1012 UNSPEC_TLSLDM_CALL);
1013 emit_libcall_block (insn, scratch, r0, eqv);
1014
1015 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1016 eqv = gen_rtx_CONST (Pmode, eqv);
1017
1018 if (alpha_tls_size == 64)
1019 {
1020 dest = gen_reg_rtx (Pmode);
1021 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1022 emit_insn (gen_adddi3 (dest, dest, scratch));
1023 return dest;
1024 }
1025 if (alpha_tls_size == 32)
1026 {
1027 insn = gen_rtx_HIGH (Pmode, eqv);
1028 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1029 scratch = gen_reg_rtx (Pmode);
1030 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1031 }
1032 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1033
1034 case TLS_MODEL_INITIAL_EXEC:
1035 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1036 eqv = gen_rtx_CONST (Pmode, eqv);
1037 tp = gen_reg_rtx (Pmode);
1038 scratch = gen_reg_rtx (Pmode);
1039 dest = gen_reg_rtx (Pmode);
1040
1041 emit_insn (gen_load_tp (tp));
1042 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1043 emit_insn (gen_adddi3 (dest, tp, scratch));
1044 return dest;
1045
1046 case TLS_MODEL_LOCAL_EXEC:
1047 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1048 eqv = gen_rtx_CONST (Pmode, eqv);
1049 tp = gen_reg_rtx (Pmode);
1050
1051 emit_insn (gen_load_tp (tp));
1052 if (alpha_tls_size == 32)
1053 {
1054 insn = gen_rtx_HIGH (Pmode, eqv);
1055 insn = gen_rtx_PLUS (Pmode, tp, insn);
1056 tp = gen_reg_rtx (Pmode);
1057 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1058 }
1059 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1060
1061 default:
1062 gcc_unreachable ();
1063 }
1064
1065 if (local_symbolic_operand (x, Pmode))
1066 {
1067 if (small_symbolic_operand (x, Pmode))
1068 return x;
1069 else
1070 {
1071 if (can_create_pseudo_p ())
1072 scratch = gen_reg_rtx (Pmode);
1073 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1074 gen_rtx_HIGH (Pmode, x)));
1075 return gen_rtx_LO_SUM (Pmode, scratch, x);
1076 }
1077 }
1078 }
1079
1080 return NULL;
1081
1082 split_addend:
1083 {
1084 HOST_WIDE_INT low, high;
1085
1086 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1087 addend -= low;
1088 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1089 addend -= high;
1090
1091 if (addend)
1092 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1093 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1094 1, OPTAB_LIB_WIDEN);
1095 if (high)
1096 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1097 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1098 1, OPTAB_LIB_WIDEN);
1099
1100 return plus_constant (x, low);
1101 }
1102 }
1103
1104 /* Primarily this is required for TLS symbols, but given that our move
1105 patterns *ought* to be able to handle any symbol at any time, we
1106 should never be spilling symbolic operands to the constant pool, ever. */
1107
1108 static bool
1109 alpha_cannot_force_const_mem (rtx x)
1110 {
1111 enum rtx_code code = GET_CODE (x);
1112 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1113 }
1114
1115 /* We do not allow indirect calls to be optimized into sibling calls, nor
1116 can we allow a call to a function with a different GP to be optimized
1117 into a sibcall. */
1118
1119 static bool
1120 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1121 {
1122 /* Can't do indirect tail calls, since we don't know if the target
1123 uses the same GP. */
1124 if (!decl)
1125 return false;
1126
1127 /* Otherwise, we can make a tail call if the target function shares
1128 the same GP. */
1129 return decl_has_samegp (decl);
1130 }
1131
1132 int
1133 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1134 {
1135 rtx x = *px;
1136
1137 /* Don't re-split. */
1138 if (GET_CODE (x) == LO_SUM)
1139 return -1;
1140
1141 return small_symbolic_operand (x, Pmode) != 0;
1142 }
1143
1144 static int
1145 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1146 {
1147 rtx x = *px;
1148
1149 /* Don't re-split. */
1150 if (GET_CODE (x) == LO_SUM)
1151 return -1;
1152
1153 if (small_symbolic_operand (x, Pmode))
1154 {
1155 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1156 *px = x;
1157 return -1;
1158 }
1159
1160 return 0;
1161 }
1162
1163 rtx
1164 split_small_symbolic_operand (rtx x)
1165 {
1166 x = copy_insn (x);
1167 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1168 return x;
1169 }
1170
1171 /* Indicate that INSN cannot be duplicated. This is true for any insn
1172 that we've marked with gpdisp relocs, since those have to stay in
1173 1-1 correspondence with one another.
1174
1175 Technically we could copy them if we could set up a mapping from one
1176 sequence number to another, across the set of insns to be duplicated.
1177 This seems overly complicated and error-prone since interblock motion
1178 from sched-ebb could move one of the pair of insns to a different block.
1179
1180 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1181 then they'll be in a different block from their ldgp. Which could lead
1182 the bb reorder code to think that it would be ok to copy just the block
1183 containing the call and branch to the block containing the ldgp. */
1184
1185 static bool
1186 alpha_cannot_copy_insn_p (rtx insn)
1187 {
1188 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1189 return false;
1190 if (recog_memoized (insn) >= 0)
1191 return get_attr_cannot_copy (insn);
1192 else
1193 return false;
1194 }
1195
1196
1197 /* Try a machine-dependent way of reloading an illegitimate address
1198 operand. If we find one, push the reload and return the new rtx. */
1199
1200 rtx
1201 alpha_legitimize_reload_address (rtx x,
1202 enum machine_mode mode ATTRIBUTE_UNUSED,
1203 int opnum, int type,
1204 int ind_levels ATTRIBUTE_UNUSED)
1205 {
1206 /* We must recognize output that we have already generated ourselves. */
1207 if (GET_CODE (x) == PLUS
1208 && GET_CODE (XEXP (x, 0)) == PLUS
1209 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1210 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1211 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1212 {
1213 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1214 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1215 opnum, type);
1216 return x;
1217 }
1218
1219 /* We wish to handle large displacements off a base register by
1220 splitting the addend across an ldah and the mem insn. This
1221 cuts number of extra insns needed from 3 to 1. */
1222 if (GET_CODE (x) == PLUS
1223 && GET_CODE (XEXP (x, 0)) == REG
1224 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1225 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1226 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1227 {
1228 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1229 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1230 HOST_WIDE_INT high
1231 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1232
1233 /* Check for 32-bit overflow. */
1234 if (high + low != val)
1235 return NULL_RTX;
1236
1237 /* Reload the high part into a base reg; leave the low part
1238 in the mem directly. */
1239 x = gen_rtx_PLUS (GET_MODE (x),
1240 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1241 GEN_INT (high)),
1242 GEN_INT (low));
1243
1244 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1245 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1246 opnum, type);
1247 return x;
1248 }
1249
1250 return NULL_RTX;
1251 }
1252 \f
1253 /* Compute a (partial) cost for rtx X. Return true if the complete
1254 cost has been computed, and false if subexpressions should be
1255 scanned. In either case, *TOTAL contains the cost result. */
1256
1257 static bool
1258 alpha_rtx_costs (rtx x, int code, int outer_code, int *total)
1259 {
1260 enum machine_mode mode = GET_MODE (x);
1261 bool float_mode_p = FLOAT_MODE_P (mode);
1262 const struct alpha_rtx_cost_data *cost_data;
1263
1264 if (optimize_size)
1265 cost_data = &alpha_rtx_cost_size;
1266 else
1267 cost_data = &alpha_rtx_cost_data[alpha_tune];
1268
1269 switch (code)
1270 {
1271 case CONST_INT:
1272 /* If this is an 8-bit constant, return zero since it can be used
1273 nearly anywhere with no cost. If it is a valid operand for an
1274 ADD or AND, likewise return 0 if we know it will be used in that
1275 context. Otherwise, return 2 since it might be used there later.
1276 All other constants take at least two insns. */
1277 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1278 {
1279 *total = 0;
1280 return true;
1281 }
1282 /* FALLTHRU */
1283
1284 case CONST_DOUBLE:
1285 if (x == CONST0_RTX (mode))
1286 *total = 0;
1287 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1288 || (outer_code == AND && and_operand (x, VOIDmode)))
1289 *total = 0;
1290 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1291 *total = 2;
1292 else
1293 *total = COSTS_N_INSNS (2);
1294 return true;
1295
1296 case CONST:
1297 case SYMBOL_REF:
1298 case LABEL_REF:
1299 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1300 *total = COSTS_N_INSNS (outer_code != MEM);
1301 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1302 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1303 else if (tls_symbolic_operand_type (x))
1304 /* Estimate of cost for call_pal rduniq. */
1305 /* ??? How many insns do we emit here? More than one... */
1306 *total = COSTS_N_INSNS (15);
1307 else
1308 /* Otherwise we do a load from the GOT. */
1309 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1310 return true;
1311
1312 case HIGH:
1313 /* This is effectively an add_operand. */
1314 *total = 2;
1315 return true;
1316
1317 case PLUS:
1318 case MINUS:
1319 if (float_mode_p)
1320 *total = cost_data->fp_add;
1321 else if (GET_CODE (XEXP (x, 0)) == MULT
1322 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1323 {
1324 *total = (rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
1325 + rtx_cost (XEXP (x, 1), outer_code) + COSTS_N_INSNS (1));
1326 return true;
1327 }
1328 return false;
1329
1330 case MULT:
1331 if (float_mode_p)
1332 *total = cost_data->fp_mult;
1333 else if (mode == DImode)
1334 *total = cost_data->int_mult_di;
1335 else
1336 *total = cost_data->int_mult_si;
1337 return false;
1338
1339 case ASHIFT:
1340 if (GET_CODE (XEXP (x, 1)) == CONST_INT
1341 && INTVAL (XEXP (x, 1)) <= 3)
1342 {
1343 *total = COSTS_N_INSNS (1);
1344 return false;
1345 }
1346 /* FALLTHRU */
1347
1348 case ASHIFTRT:
1349 case LSHIFTRT:
1350 *total = cost_data->int_shift;
1351 return false;
1352
1353 case IF_THEN_ELSE:
1354 if (float_mode_p)
1355 *total = cost_data->fp_add;
1356 else
1357 *total = cost_data->int_cmov;
1358 return false;
1359
1360 case DIV:
1361 case UDIV:
1362 case MOD:
1363 case UMOD:
1364 if (!float_mode_p)
1365 *total = cost_data->int_div;
1366 else if (mode == SFmode)
1367 *total = cost_data->fp_div_sf;
1368 else
1369 *total = cost_data->fp_div_df;
1370 return false;
1371
1372 case MEM:
1373 *total = COSTS_N_INSNS (optimize_size ? 1 : alpha_memory_latency);
1374 return true;
1375
1376 case NEG:
1377 if (! float_mode_p)
1378 {
1379 *total = COSTS_N_INSNS (1);
1380 return false;
1381 }
1382 /* FALLTHRU */
1383
1384 case ABS:
1385 if (! float_mode_p)
1386 {
1387 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1388 return false;
1389 }
1390 /* FALLTHRU */
1391
1392 case FLOAT:
1393 case UNSIGNED_FLOAT:
1394 case FIX:
1395 case UNSIGNED_FIX:
1396 case FLOAT_TRUNCATE:
1397 *total = cost_data->fp_add;
1398 return false;
1399
1400 case FLOAT_EXTEND:
1401 if (GET_CODE (XEXP (x, 0)) == MEM)
1402 *total = 0;
1403 else
1404 *total = cost_data->fp_add;
1405 return false;
1406
1407 default:
1408 return false;
1409 }
1410 }
1411 \f
1412 /* REF is an alignable memory location. Place an aligned SImode
1413 reference into *PALIGNED_MEM and the number of bits to shift into
1414 *PBITNUM. SCRATCH is a free register for use in reloading out
1415 of range stack slots. */
1416
1417 void
1418 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1419 {
1420 rtx base;
1421 HOST_WIDE_INT disp, offset;
1422
1423 gcc_assert (GET_CODE (ref) == MEM);
1424
1425 if (reload_in_progress
1426 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1427 {
1428 base = find_replacement (&XEXP (ref, 0));
1429 gcc_assert (memory_address_p (GET_MODE (ref), base));
1430 }
1431 else
1432 base = XEXP (ref, 0);
1433
1434 if (GET_CODE (base) == PLUS)
1435 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1436 else
1437 disp = 0;
1438
1439 /* Find the byte offset within an aligned word. If the memory itself is
1440 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1441 will have examined the base register and determined it is aligned, and
1442 thus displacements from it are naturally alignable. */
1443 if (MEM_ALIGN (ref) >= 32)
1444 offset = 0;
1445 else
1446 offset = disp & 3;
1447
1448 /* Access the entire aligned word. */
1449 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1450
1451 /* Convert the byte offset within the word to a bit offset. */
1452 if (WORDS_BIG_ENDIAN)
1453 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1454 else
1455 offset *= 8;
1456 *pbitnum = GEN_INT (offset);
1457 }
1458
1459 /* Similar, but just get the address. Handle the two reload cases.
1460 Add EXTRA_OFFSET to the address we return. */
1461
1462 rtx
1463 get_unaligned_address (rtx ref)
1464 {
1465 rtx base;
1466 HOST_WIDE_INT offset = 0;
1467
1468 gcc_assert (GET_CODE (ref) == MEM);
1469
1470 if (reload_in_progress
1471 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1472 {
1473 base = find_replacement (&XEXP (ref, 0));
1474
1475 gcc_assert (memory_address_p (GET_MODE (ref), base));
1476 }
1477 else
1478 base = XEXP (ref, 0);
1479
1480 if (GET_CODE (base) == PLUS)
1481 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1482
1483 return plus_constant (base, offset);
1484 }
1485
1486 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1487 X is always returned in a register. */
1488
1489 rtx
1490 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1491 {
1492 if (GET_CODE (addr) == PLUS)
1493 {
1494 ofs += INTVAL (XEXP (addr, 1));
1495 addr = XEXP (addr, 0);
1496 }
1497
1498 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1499 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1500 }
1501
1502 /* On the Alpha, all (non-symbolic) constants except zero go into
1503 a floating-point register via memory. Note that we cannot
1504 return anything that is not a subset of CLASS, and that some
1505 symbolic constants cannot be dropped to memory. */
1506
1507 enum reg_class
1508 alpha_preferred_reload_class(rtx x, enum reg_class class)
1509 {
1510 /* Zero is present in any register class. */
1511 if (x == CONST0_RTX (GET_MODE (x)))
1512 return class;
1513
1514 /* These sorts of constants we can easily drop to memory. */
1515 if (GET_CODE (x) == CONST_INT
1516 || GET_CODE (x) == CONST_DOUBLE
1517 || GET_CODE (x) == CONST_VECTOR)
1518 {
1519 if (class == FLOAT_REGS)
1520 return NO_REGS;
1521 if (class == ALL_REGS)
1522 return GENERAL_REGS;
1523 return class;
1524 }
1525
1526 /* All other kinds of constants should not (and in the case of HIGH
1527 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1528 secondary reload. */
1529 if (CONSTANT_P (x))
1530 return (class == ALL_REGS ? GENERAL_REGS : class);
1531
1532 return class;
1533 }
1534
1535 /* Inform reload about cases where moving X with a mode MODE to a register in
1536 CLASS requires an extra scratch or immediate register. Return the class
1537 needed for the immediate register. */
1538
1539 static enum reg_class
1540 alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
1541 enum machine_mode mode, secondary_reload_info *sri)
1542 {
1543 /* Loading and storing HImode or QImode values to and from memory
1544 usually requires a scratch register. */
1545 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1546 {
1547 if (any_memory_operand (x, mode))
1548 {
1549 if (in_p)
1550 {
1551 if (!aligned_memory_operand (x, mode))
1552 sri->icode = reload_in_optab[mode];
1553 }
1554 else
1555 sri->icode = reload_out_optab[mode];
1556 return NO_REGS;
1557 }
1558 }
1559
1560 /* We also cannot do integral arithmetic into FP regs, as might result
1561 from register elimination into a DImode fp register. */
1562 if (class == FLOAT_REGS)
1563 {
1564 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1565 return GENERAL_REGS;
1566 if (in_p && INTEGRAL_MODE_P (mode)
1567 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1568 return GENERAL_REGS;
1569 }
1570
1571 return NO_REGS;
1572 }
1573 \f
1574 /* Subfunction of the following function. Update the flags of any MEM
1575 found in part of X. */
1576
1577 static int
1578 alpha_set_memflags_1 (rtx *xp, void *data)
1579 {
1580 rtx x = *xp, orig = (rtx) data;
1581
1582 if (GET_CODE (x) != MEM)
1583 return 0;
1584
1585 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1586 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1587 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1588 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1589 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1590
1591 /* Sadly, we cannot use alias sets because the extra aliasing
1592 produced by the AND interferes. Given that two-byte quantities
1593 are the only thing we would be able to differentiate anyway,
1594 there does not seem to be any point in convoluting the early
1595 out of the alias check. */
1596
1597 return -1;
1598 }
1599
1600 /* Given INSN, which is an INSN list or the PATTERN of a single insn
1601 generated to perform a memory operation, look for any MEMs in either
1602 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1603 volatile flags from REF into each of the MEMs found. If REF is not
1604 a MEM, don't do anything. */
1605
1606 void
1607 alpha_set_memflags (rtx insn, rtx ref)
1608 {
1609 rtx *base_ptr;
1610
1611 if (GET_CODE (ref) != MEM)
1612 return;
1613
1614 /* This is only called from alpha.md, after having had something
1615 generated from one of the insn patterns. So if everything is
1616 zero, the pattern is already up-to-date. */
1617 if (!MEM_VOLATILE_P (ref)
1618 && !MEM_IN_STRUCT_P (ref)
1619 && !MEM_SCALAR_P (ref)
1620 && !MEM_NOTRAP_P (ref)
1621 && !MEM_READONLY_P (ref))
1622 return;
1623
1624 if (INSN_P (insn))
1625 base_ptr = &PATTERN (insn);
1626 else
1627 base_ptr = &insn;
1628 for_each_rtx (base_ptr, alpha_set_memflags_1, (void *) ref);
1629 }
1630 \f
1631 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1632 int, bool);
1633
1634 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1635 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1636 and return pc_rtx if successful. */
1637
1638 static rtx
1639 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1640 HOST_WIDE_INT c, int n, bool no_output)
1641 {
1642 HOST_WIDE_INT new;
1643 int i, bits;
1644 /* Use a pseudo if highly optimizing and still generating RTL. */
1645 rtx subtarget
1646 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1647 rtx temp, insn;
1648
1649 /* If this is a sign-extended 32-bit constant, we can do this in at most
1650 three insns, so do it if we have enough insns left. We always have
1651 a sign-extended 32-bit constant when compiling on a narrow machine. */
1652
1653 if (HOST_BITS_PER_WIDE_INT != 64
1654 || c >> 31 == -1 || c >> 31 == 0)
1655 {
1656 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1657 HOST_WIDE_INT tmp1 = c - low;
1658 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1659 HOST_WIDE_INT extra = 0;
1660
1661 /* If HIGH will be interpreted as negative but the constant is
1662 positive, we must adjust it to do two ldha insns. */
1663
1664 if ((high & 0x8000) != 0 && c >= 0)
1665 {
1666 extra = 0x4000;
1667 tmp1 -= 0x40000000;
1668 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1669 }
1670
1671 if (c == low || (low == 0 && extra == 0))
1672 {
1673 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1674 but that meant that we can't handle INT_MIN on 32-bit machines
1675 (like NT/Alpha), because we recurse indefinitely through
1676 emit_move_insn to gen_movdi. So instead, since we know exactly
1677 what we want, create it explicitly. */
1678
1679 if (no_output)
1680 return pc_rtx;
1681 if (target == NULL)
1682 target = gen_reg_rtx (mode);
1683 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1684 return target;
1685 }
1686 else if (n >= 2 + (extra != 0))
1687 {
1688 if (no_output)
1689 return pc_rtx;
1690 if (!can_create_pseudo_p ())
1691 {
1692 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1693 temp = target;
1694 }
1695 else
1696 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1697 subtarget, mode);
1698
1699 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1700 This means that if we go through expand_binop, we'll try to
1701 generate extensions, etc, which will require new pseudos, which
1702 will fail during some split phases. The SImode add patterns
1703 still exist, but are not named. So build the insns by hand. */
1704
1705 if (extra != 0)
1706 {
1707 if (! subtarget)
1708 subtarget = gen_reg_rtx (mode);
1709 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1710 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1711 emit_insn (insn);
1712 temp = subtarget;
1713 }
1714
1715 if (target == NULL)
1716 target = gen_reg_rtx (mode);
1717 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1718 insn = gen_rtx_SET (VOIDmode, target, insn);
1719 emit_insn (insn);
1720 return target;
1721 }
1722 }
1723
1724 /* If we couldn't do it that way, try some other methods. But if we have
1725 no instructions left, don't bother. Likewise, if this is SImode and
1726 we can't make pseudos, we can't do anything since the expand_binop
1727 and expand_unop calls will widen and try to make pseudos. */
1728
1729 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1730 return 0;
1731
1732 /* Next, see if we can load a related constant and then shift and possibly
1733 negate it to get the constant we want. Try this once each increasing
1734 numbers of insns. */
1735
1736 for (i = 1; i < n; i++)
1737 {
1738 /* First, see if minus some low bits, we've an easy load of
1739 high bits. */
1740
1741 new = ((c & 0xffff) ^ 0x8000) - 0x8000;
1742 if (new != 0)
1743 {
1744 temp = alpha_emit_set_const (subtarget, mode, c - new, i, no_output);
1745 if (temp)
1746 {
1747 if (no_output)
1748 return temp;
1749 return expand_binop (mode, add_optab, temp, GEN_INT (new),
1750 target, 0, OPTAB_WIDEN);
1751 }
1752 }
1753
1754 /* Next try complementing. */
1755 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1756 if (temp)
1757 {
1758 if (no_output)
1759 return temp;
1760 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1761 }
1762
1763 /* Next try to form a constant and do a left shift. We can do this
1764 if some low-order bits are zero; the exact_log2 call below tells
1765 us that information. The bits we are shifting out could be any
1766 value, but here we'll just try the 0- and sign-extended forms of
1767 the constant. To try to increase the chance of having the same
1768 constant in more than one insn, start at the highest number of
1769 bits to shift, but try all possibilities in case a ZAPNOT will
1770 be useful. */
1771
1772 bits = exact_log2 (c & -c);
1773 if (bits > 0)
1774 for (; bits > 0; bits--)
1775 {
1776 new = c >> bits;
1777 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1778 if (!temp && c < 0)
1779 {
1780 new = (unsigned HOST_WIDE_INT)c >> bits;
1781 temp = alpha_emit_set_const (subtarget, mode, new,
1782 i, no_output);
1783 }
1784 if (temp)
1785 {
1786 if (no_output)
1787 return temp;
1788 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1789 target, 0, OPTAB_WIDEN);
1790 }
1791 }
1792
1793 /* Now try high-order zero bits. Here we try the shifted-in bits as
1794 all zero and all ones. Be careful to avoid shifting outside the
1795 mode and to avoid shifting outside the host wide int size. */
1796 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1797 confuse the recursive call and set all of the high 32 bits. */
1798
1799 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1800 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1801 if (bits > 0)
1802 for (; bits > 0; bits--)
1803 {
1804 new = c << bits;
1805 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1806 if (!temp)
1807 {
1808 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1809 temp = alpha_emit_set_const (subtarget, mode, new,
1810 i, no_output);
1811 }
1812 if (temp)
1813 {
1814 if (no_output)
1815 return temp;
1816 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1817 target, 1, OPTAB_WIDEN);
1818 }
1819 }
1820
1821 /* Now try high-order 1 bits. We get that with a sign-extension.
1822 But one bit isn't enough here. Be careful to avoid shifting outside
1823 the mode and to avoid shifting outside the host wide int size. */
1824
1825 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1826 - floor_log2 (~ c) - 2);
1827 if (bits > 0)
1828 for (; bits > 0; bits--)
1829 {
1830 new = c << bits;
1831 temp = alpha_emit_set_const (subtarget, mode, new, i, no_output);
1832 if (!temp)
1833 {
1834 new = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1835 temp = alpha_emit_set_const (subtarget, mode, new,
1836 i, no_output);
1837 }
1838 if (temp)
1839 {
1840 if (no_output)
1841 return temp;
1842 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1843 target, 0, OPTAB_WIDEN);
1844 }
1845 }
1846 }
1847
1848 #if HOST_BITS_PER_WIDE_INT == 64
1849 /* Finally, see if can load a value into the target that is the same as the
1850 constant except that all bytes that are 0 are changed to be 0xff. If we
1851 can, then we can do a ZAPNOT to obtain the desired constant. */
1852
1853 new = c;
1854 for (i = 0; i < 64; i += 8)
1855 if ((new & ((HOST_WIDE_INT) 0xff << i)) == 0)
1856 new |= (HOST_WIDE_INT) 0xff << i;
1857
1858 /* We are only called for SImode and DImode. If this is SImode, ensure that
1859 we are sign extended to a full word. */
1860
1861 if (mode == SImode)
1862 new = ((new & 0xffffffff) ^ 0x80000000) - 0x80000000;
1863
1864 if (new != c)
1865 {
1866 temp = alpha_emit_set_const (subtarget, mode, new, n - 1, no_output);
1867 if (temp)
1868 {
1869 if (no_output)
1870 return temp;
1871 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new),
1872 target, 0, OPTAB_WIDEN);
1873 }
1874 }
1875 #endif
1876
1877 return 0;
1878 }
1879
1880 /* Try to output insns to set TARGET equal to the constant C if it can be
1881 done in less than N insns. Do all computations in MODE. Returns the place
1882 where the output has been placed if it can be done and the insns have been
1883 emitted. If it would take more than N insns, zero is returned and no
1884 insns and emitted. */
1885
1886 static rtx
1887 alpha_emit_set_const (rtx target, enum machine_mode mode,
1888 HOST_WIDE_INT c, int n, bool no_output)
1889 {
1890 enum machine_mode orig_mode = mode;
1891 rtx orig_target = target;
1892 rtx result = 0;
1893 int i;
1894
1895 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1896 can't load this constant in one insn, do this in DImode. */
1897 if (!can_create_pseudo_p () && mode == SImode
1898 && GET_CODE (target) == REG && REGNO (target) < FIRST_PSEUDO_REGISTER)
1899 {
1900 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1901 if (result)
1902 return result;
1903
1904 target = no_output ? NULL : gen_lowpart (DImode, target);
1905 mode = DImode;
1906 }
1907 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1908 {
1909 target = no_output ? NULL : gen_lowpart (DImode, target);
1910 mode = DImode;
1911 }
1912
1913 /* Try 1 insn, then 2, then up to N. */
1914 for (i = 1; i <= n; i++)
1915 {
1916 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1917 if (result)
1918 {
1919 rtx insn, set;
1920
1921 if (no_output)
1922 return result;
1923
1924 insn = get_last_insn ();
1925 set = single_set (insn);
1926 if (! CONSTANT_P (SET_SRC (set)))
1927 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1928 break;
1929 }
1930 }
1931
1932 /* Allow for the case where we changed the mode of TARGET. */
1933 if (result)
1934 {
1935 if (result == target)
1936 result = orig_target;
1937 else if (mode != orig_mode)
1938 result = gen_lowpart (orig_mode, result);
1939 }
1940
1941 return result;
1942 }
1943
1944 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1945 fall back to a straight forward decomposition. We do this to avoid
1946 exponential run times encountered when looking for longer sequences
1947 with alpha_emit_set_const. */
1948
1949 static rtx
1950 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1951 {
1952 HOST_WIDE_INT d1, d2, d3, d4;
1953
1954 /* Decompose the entire word */
1955 #if HOST_BITS_PER_WIDE_INT >= 64
1956 gcc_assert (c2 == -(c1 < 0));
1957 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1958 c1 -= d1;
1959 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1960 c1 = (c1 - d2) >> 32;
1961 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1962 c1 -= d3;
1963 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1964 gcc_assert (c1 == d4);
1965 #else
1966 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1967 c1 -= d1;
1968 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1969 gcc_assert (c1 == d2);
1970 c2 += (d2 < 0);
1971 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1972 c2 -= d3;
1973 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1974 gcc_assert (c2 == d4);
1975 #endif
1976
1977 /* Construct the high word */
1978 if (d4)
1979 {
1980 emit_move_insn (target, GEN_INT (d4));
1981 if (d3)
1982 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
1983 }
1984 else
1985 emit_move_insn (target, GEN_INT (d3));
1986
1987 /* Shift it into place */
1988 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
1989
1990 /* Add in the low bits. */
1991 if (d2)
1992 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
1993 if (d1)
1994 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
1995
1996 return target;
1997 }
1998
1999 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2000 the low 64 bits. */
2001
2002 static void
2003 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2004 {
2005 HOST_WIDE_INT i0, i1;
2006
2007 if (GET_CODE (x) == CONST_VECTOR)
2008 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2009
2010
2011 if (GET_CODE (x) == CONST_INT)
2012 {
2013 i0 = INTVAL (x);
2014 i1 = -(i0 < 0);
2015 }
2016 else if (HOST_BITS_PER_WIDE_INT >= 64)
2017 {
2018 i0 = CONST_DOUBLE_LOW (x);
2019 i1 = -(i0 < 0);
2020 }
2021 else
2022 {
2023 i0 = CONST_DOUBLE_LOW (x);
2024 i1 = CONST_DOUBLE_HIGH (x);
2025 }
2026
2027 *p0 = i0;
2028 *p1 = i1;
2029 }
2030
2031 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2032 are willing to load the value into a register via a move pattern.
2033 Normally this is all symbolic constants, integral constants that
2034 take three or fewer instructions, and floating-point zero. */
2035
2036 bool
2037 alpha_legitimate_constant_p (rtx x)
2038 {
2039 enum machine_mode mode = GET_MODE (x);
2040 HOST_WIDE_INT i0, i1;
2041
2042 switch (GET_CODE (x))
2043 {
2044 case CONST:
2045 case LABEL_REF:
2046 case HIGH:
2047 return true;
2048
2049 case SYMBOL_REF:
2050 /* TLS symbols are never valid. */
2051 return SYMBOL_REF_TLS_MODEL (x) == 0;
2052
2053 case CONST_DOUBLE:
2054 if (x == CONST0_RTX (mode))
2055 return true;
2056 if (FLOAT_MODE_P (mode))
2057 return false;
2058 goto do_integer;
2059
2060 case CONST_VECTOR:
2061 if (x == CONST0_RTX (mode))
2062 return true;
2063 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2064 return false;
2065 if (GET_MODE_SIZE (mode) != 8)
2066 return false;
2067 goto do_integer;
2068
2069 case CONST_INT:
2070 do_integer:
2071 if (TARGET_BUILD_CONSTANTS)
2072 return true;
2073 alpha_extract_integer (x, &i0, &i1);
2074 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2075 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2076 return false;
2077
2078 default:
2079 return false;
2080 }
2081 }
2082
2083 /* Operand 1 is known to be a constant, and should require more than one
2084 instruction to load. Emit that multi-part load. */
2085
2086 bool
2087 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2088 {
2089 HOST_WIDE_INT i0, i1;
2090 rtx temp = NULL_RTX;
2091
2092 alpha_extract_integer (operands[1], &i0, &i1);
2093
2094 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2095 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2096
2097 if (!temp && TARGET_BUILD_CONSTANTS)
2098 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2099
2100 if (temp)
2101 {
2102 if (!rtx_equal_p (operands[0], temp))
2103 emit_move_insn (operands[0], temp);
2104 return true;
2105 }
2106
2107 return false;
2108 }
2109
2110 /* Expand a move instruction; return true if all work is done.
2111 We don't handle non-bwx subword loads here. */
2112
2113 bool
2114 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2115 {
2116 /* If the output is not a register, the input must be. */
2117 if (GET_CODE (operands[0]) == MEM
2118 && ! reg_or_0_operand (operands[1], mode))
2119 operands[1] = force_reg (mode, operands[1]);
2120
2121 /* Allow legitimize_address to perform some simplifications. */
2122 if (mode == Pmode && symbolic_operand (operands[1], mode))
2123 {
2124 rtx tmp;
2125
2126 tmp = alpha_legitimize_address (operands[1], operands[0], mode);
2127 if (tmp)
2128 {
2129 if (tmp == operands[0])
2130 return true;
2131 operands[1] = tmp;
2132 return false;
2133 }
2134 }
2135
2136 /* Early out for non-constants and valid constants. */
2137 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2138 return false;
2139
2140 /* Split large integers. */
2141 if (GET_CODE (operands[1]) == CONST_INT
2142 || GET_CODE (operands[1]) == CONST_DOUBLE
2143 || GET_CODE (operands[1]) == CONST_VECTOR)
2144 {
2145 if (alpha_split_const_mov (mode, operands))
2146 return true;
2147 }
2148
2149 /* Otherwise we've nothing left but to drop the thing to memory. */
2150 operands[1] = force_const_mem (mode, operands[1]);
2151 if (reload_in_progress)
2152 {
2153 emit_move_insn (operands[0], XEXP (operands[1], 0));
2154 operands[1] = replace_equiv_address (operands[1], operands[0]);
2155 }
2156 else
2157 operands[1] = validize_mem (operands[1]);
2158 return false;
2159 }
2160
2161 /* Expand a non-bwx QImode or HImode move instruction;
2162 return true if all work is done. */
2163
2164 bool
2165 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2166 {
2167 rtx seq;
2168
2169 /* If the output is not a register, the input must be. */
2170 if (MEM_P (operands[0]))
2171 operands[1] = force_reg (mode, operands[1]);
2172
2173 /* Handle four memory cases, unaligned and aligned for either the input
2174 or the output. The only case where we can be called during reload is
2175 for aligned loads; all other cases require temporaries. */
2176
2177 if (any_memory_operand (operands[1], mode))
2178 {
2179 if (aligned_memory_operand (operands[1], mode))
2180 {
2181 if (reload_in_progress)
2182 {
2183 if (mode == QImode)
2184 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2185 else
2186 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2187 emit_insn (seq);
2188 }
2189 else
2190 {
2191 rtx aligned_mem, bitnum;
2192 rtx scratch = gen_reg_rtx (SImode);
2193 rtx subtarget;
2194 bool copyout;
2195
2196 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2197
2198 subtarget = operands[0];
2199 if (GET_CODE (subtarget) == REG)
2200 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2201 else
2202 subtarget = gen_reg_rtx (DImode), copyout = true;
2203
2204 if (mode == QImode)
2205 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2206 bitnum, scratch);
2207 else
2208 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2209 bitnum, scratch);
2210 emit_insn (seq);
2211
2212 if (copyout)
2213 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2214 }
2215 }
2216 else
2217 {
2218 /* Don't pass these as parameters since that makes the generated
2219 code depend on parameter evaluation order which will cause
2220 bootstrap failures. */
2221
2222 rtx temp1, temp2, subtarget, ua;
2223 bool copyout;
2224
2225 temp1 = gen_reg_rtx (DImode);
2226 temp2 = gen_reg_rtx (DImode);
2227
2228 subtarget = operands[0];
2229 if (GET_CODE (subtarget) == REG)
2230 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2231 else
2232 subtarget = gen_reg_rtx (DImode), copyout = true;
2233
2234 ua = get_unaligned_address (operands[1]);
2235 if (mode == QImode)
2236 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2237 else
2238 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2239
2240 alpha_set_memflags (seq, operands[1]);
2241 emit_insn (seq);
2242
2243 if (copyout)
2244 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2245 }
2246 return true;
2247 }
2248
2249 if (any_memory_operand (operands[0], mode))
2250 {
2251 if (aligned_memory_operand (operands[0], mode))
2252 {
2253 rtx aligned_mem, bitnum;
2254 rtx temp1 = gen_reg_rtx (SImode);
2255 rtx temp2 = gen_reg_rtx (SImode);
2256
2257 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2258
2259 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2260 temp1, temp2));
2261 }
2262 else
2263 {
2264 rtx temp1 = gen_reg_rtx (DImode);
2265 rtx temp2 = gen_reg_rtx (DImode);
2266 rtx temp3 = gen_reg_rtx (DImode);
2267 rtx ua = get_unaligned_address (operands[0]);
2268
2269 if (mode == QImode)
2270 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2271 else
2272 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2273
2274 alpha_set_memflags (seq, operands[0]);
2275 emit_insn (seq);
2276 }
2277 return true;
2278 }
2279
2280 return false;
2281 }
2282
2283 /* Implement the movmisalign patterns. One of the operands is a memory
2284 that is not naturally aligned. Emit instructions to load it. */
2285
2286 void
2287 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2288 {
2289 /* Honor misaligned loads, for those we promised to do so. */
2290 if (MEM_P (operands[1]))
2291 {
2292 rtx tmp;
2293
2294 if (register_operand (operands[0], mode))
2295 tmp = operands[0];
2296 else
2297 tmp = gen_reg_rtx (mode);
2298
2299 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2300 if (tmp != operands[0])
2301 emit_move_insn (operands[0], tmp);
2302 }
2303 else if (MEM_P (operands[0]))
2304 {
2305 if (!reg_or_0_operand (operands[1], mode))
2306 operands[1] = force_reg (mode, operands[1]);
2307 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2308 }
2309 else
2310 gcc_unreachable ();
2311 }
2312
2313 /* Generate an unsigned DImode to FP conversion. This is the same code
2314 optabs would emit if we didn't have TFmode patterns.
2315
2316 For SFmode, this is the only construction I've found that can pass
2317 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2318 intermediates will work, because you'll get intermediate rounding
2319 that ruins the end result. Some of this could be fixed by turning
2320 on round-to-positive-infinity, but that requires diddling the fpsr,
2321 which kills performance. I tried turning this around and converting
2322 to a negative number, so that I could turn on /m, but either I did
2323 it wrong or there's something else cause I wound up with the exact
2324 same single-bit error. There is a branch-less form of this same code:
2325
2326 srl $16,1,$1
2327 and $16,1,$2
2328 cmplt $16,0,$3
2329 or $1,$2,$2
2330 cmovge $16,$16,$2
2331 itoft $3,$f10
2332 itoft $2,$f11
2333 cvtqs $f11,$f11
2334 adds $f11,$f11,$f0
2335 fcmoveq $f10,$f11,$f0
2336
2337 I'm not using it because it's the same number of instructions as
2338 this branch-full form, and it has more serialized long latency
2339 instructions on the critical path.
2340
2341 For DFmode, we can avoid rounding errors by breaking up the word
2342 into two pieces, converting them separately, and adding them back:
2343
2344 LC0: .long 0,0x5f800000
2345
2346 itoft $16,$f11
2347 lda $2,LC0
2348 cmplt $16,0,$1
2349 cpyse $f11,$f31,$f10
2350 cpyse $f31,$f11,$f11
2351 s4addq $1,$2,$1
2352 lds $f12,0($1)
2353 cvtqt $f10,$f10
2354 cvtqt $f11,$f11
2355 addt $f12,$f10,$f0
2356 addt $f0,$f11,$f0
2357
2358 This doesn't seem to be a clear-cut win over the optabs form.
2359 It probably all depends on the distribution of numbers being
2360 converted -- in the optabs form, all but high-bit-set has a
2361 much lower minimum execution time. */
2362
2363 void
2364 alpha_emit_floatuns (rtx operands[2])
2365 {
2366 rtx neglab, donelab, i0, i1, f0, in, out;
2367 enum machine_mode mode;
2368
2369 out = operands[0];
2370 in = force_reg (DImode, operands[1]);
2371 mode = GET_MODE (out);
2372 neglab = gen_label_rtx ();
2373 donelab = gen_label_rtx ();
2374 i0 = gen_reg_rtx (DImode);
2375 i1 = gen_reg_rtx (DImode);
2376 f0 = gen_reg_rtx (mode);
2377
2378 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2379
2380 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2381 emit_jump_insn (gen_jump (donelab));
2382 emit_barrier ();
2383
2384 emit_label (neglab);
2385
2386 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2387 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2388 emit_insn (gen_iordi3 (i0, i0, i1));
2389 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2390 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2391
2392 emit_label (donelab);
2393 }
2394
2395 /* Generate the comparison for a conditional branch. */
2396
2397 rtx
2398 alpha_emit_conditional_branch (enum rtx_code code)
2399 {
2400 enum rtx_code cmp_code, branch_code;
2401 enum machine_mode cmp_mode, branch_mode = VOIDmode;
2402 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2403 rtx tem;
2404
2405 if (alpha_compare.fp_p && GET_MODE (op0) == TFmode)
2406 {
2407 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2408 op1 = const0_rtx;
2409 alpha_compare.fp_p = 0;
2410 }
2411
2412 /* The general case: fold the comparison code to the types of compares
2413 that we have, choosing the branch as necessary. */
2414 switch (code)
2415 {
2416 case EQ: case LE: case LT: case LEU: case LTU:
2417 case UNORDERED:
2418 /* We have these compares: */
2419 cmp_code = code, branch_code = NE;
2420 break;
2421
2422 case NE:
2423 case ORDERED:
2424 /* These must be reversed. */
2425 cmp_code = reverse_condition (code), branch_code = EQ;
2426 break;
2427
2428 case GE: case GT: case GEU: case GTU:
2429 /* For FP, we swap them, for INT, we reverse them. */
2430 if (alpha_compare.fp_p)
2431 {
2432 cmp_code = swap_condition (code);
2433 branch_code = NE;
2434 tem = op0, op0 = op1, op1 = tem;
2435 }
2436 else
2437 {
2438 cmp_code = reverse_condition (code);
2439 branch_code = EQ;
2440 }
2441 break;
2442
2443 default:
2444 gcc_unreachable ();
2445 }
2446
2447 if (alpha_compare.fp_p)
2448 {
2449 cmp_mode = DFmode;
2450 if (flag_unsafe_math_optimizations)
2451 {
2452 /* When we are not as concerned about non-finite values, and we
2453 are comparing against zero, we can branch directly. */
2454 if (op1 == CONST0_RTX (DFmode))
2455 cmp_code = UNKNOWN, branch_code = code;
2456 else if (op0 == CONST0_RTX (DFmode))
2457 {
2458 /* Undo the swap we probably did just above. */
2459 tem = op0, op0 = op1, op1 = tem;
2460 branch_code = swap_condition (cmp_code);
2461 cmp_code = UNKNOWN;
2462 }
2463 }
2464 else
2465 {
2466 /* ??? We mark the branch mode to be CCmode to prevent the
2467 compare and branch from being combined, since the compare
2468 insn follows IEEE rules that the branch does not. */
2469 branch_mode = CCmode;
2470 }
2471 }
2472 else
2473 {
2474 cmp_mode = DImode;
2475
2476 /* The following optimizations are only for signed compares. */
2477 if (code != LEU && code != LTU && code != GEU && code != GTU)
2478 {
2479 /* Whee. Compare and branch against 0 directly. */
2480 if (op1 == const0_rtx)
2481 cmp_code = UNKNOWN, branch_code = code;
2482
2483 /* If the constants doesn't fit into an immediate, but can
2484 be generated by lda/ldah, we adjust the argument and
2485 compare against zero, so we can use beq/bne directly. */
2486 /* ??? Don't do this when comparing against symbols, otherwise
2487 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2488 be declared false out of hand (at least for non-weak). */
2489 else if (GET_CODE (op1) == CONST_INT
2490 && (code == EQ || code == NE)
2491 && !(symbolic_operand (op0, VOIDmode)
2492 || (GET_CODE (op0) == REG && REG_POINTER (op0))))
2493 {
2494 rtx n_op1 = GEN_INT (-INTVAL (op1));
2495
2496 if (! satisfies_constraint_I (op1)
2497 && (satisfies_constraint_K (n_op1)
2498 || satisfies_constraint_L (n_op1)))
2499 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2500 }
2501 }
2502
2503 if (!reg_or_0_operand (op0, DImode))
2504 op0 = force_reg (DImode, op0);
2505 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2506 op1 = force_reg (DImode, op1);
2507 }
2508
2509 /* Emit an initial compare instruction, if necessary. */
2510 tem = op0;
2511 if (cmp_code != UNKNOWN)
2512 {
2513 tem = gen_reg_rtx (cmp_mode);
2514 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2515 }
2516
2517 /* Zero the operands. */
2518 memset (&alpha_compare, 0, sizeof (alpha_compare));
2519
2520 /* Return the branch comparison. */
2521 return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
2522 }
2523
2524 /* Certain simplifications can be done to make invalid setcc operations
2525 valid. Return the final comparison, or NULL if we can't work. */
2526
2527 rtx
2528 alpha_emit_setcc (enum rtx_code code)
2529 {
2530 enum rtx_code cmp_code;
2531 rtx op0 = alpha_compare.op0, op1 = alpha_compare.op1;
2532 int fp_p = alpha_compare.fp_p;
2533 rtx tmp;
2534
2535 /* Zero the operands. */
2536 memset (&alpha_compare, 0, sizeof (alpha_compare));
2537
2538 if (fp_p && GET_MODE (op0) == TFmode)
2539 {
2540 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2541 op1 = const0_rtx;
2542 fp_p = 0;
2543 }
2544
2545 if (fp_p && !TARGET_FIX)
2546 return NULL_RTX;
2547
2548 /* The general case: fold the comparison code to the types of compares
2549 that we have, choosing the branch as necessary. */
2550
2551 cmp_code = UNKNOWN;
2552 switch (code)
2553 {
2554 case EQ: case LE: case LT: case LEU: case LTU:
2555 case UNORDERED:
2556 /* We have these compares. */
2557 if (fp_p)
2558 cmp_code = code, code = NE;
2559 break;
2560
2561 case NE:
2562 if (!fp_p && op1 == const0_rtx)
2563 break;
2564 /* FALLTHRU */
2565
2566 case ORDERED:
2567 cmp_code = reverse_condition (code);
2568 code = EQ;
2569 break;
2570
2571 case GE: case GT: case GEU: case GTU:
2572 /* These normally need swapping, but for integer zero we have
2573 special patterns that recognize swapped operands. */
2574 if (!fp_p && op1 == const0_rtx)
2575 break;
2576 code = swap_condition (code);
2577 if (fp_p)
2578 cmp_code = code, code = NE;
2579 tmp = op0, op0 = op1, op1 = tmp;
2580 break;
2581
2582 default:
2583 gcc_unreachable ();
2584 }
2585
2586 if (!fp_p)
2587 {
2588 if (!register_operand (op0, DImode))
2589 op0 = force_reg (DImode, op0);
2590 if (!reg_or_8bit_operand (op1, DImode))
2591 op1 = force_reg (DImode, op1);
2592 }
2593
2594 /* Emit an initial compare instruction, if necessary. */
2595 if (cmp_code != UNKNOWN)
2596 {
2597 enum machine_mode mode = fp_p ? DFmode : DImode;
2598
2599 tmp = gen_reg_rtx (mode);
2600 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2601 gen_rtx_fmt_ee (cmp_code, mode, op0, op1)));
2602
2603 op0 = fp_p ? gen_lowpart (DImode, tmp) : tmp;
2604 op1 = const0_rtx;
2605 }
2606
2607 /* Return the setcc comparison. */
2608 return gen_rtx_fmt_ee (code, DImode, op0, op1);
2609 }
2610
2611
2612 /* Rewrite a comparison against zero CMP of the form
2613 (CODE (cc0) (const_int 0)) so it can be written validly in
2614 a conditional move (if_then_else CMP ...).
2615 If both of the operands that set cc0 are nonzero we must emit
2616 an insn to perform the compare (it can't be done within
2617 the conditional move). */
2618
2619 rtx
2620 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2621 {
2622 enum rtx_code code = GET_CODE (cmp);
2623 enum rtx_code cmov_code = NE;
2624 rtx op0 = alpha_compare.op0;
2625 rtx op1 = alpha_compare.op1;
2626 int fp_p = alpha_compare.fp_p;
2627 enum machine_mode cmp_mode
2628 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2629 enum machine_mode cmp_op_mode = fp_p ? DFmode : DImode;
2630 enum machine_mode cmov_mode = VOIDmode;
2631 int local_fast_math = flag_unsafe_math_optimizations;
2632 rtx tem;
2633
2634 /* Zero the operands. */
2635 memset (&alpha_compare, 0, sizeof (alpha_compare));
2636
2637 if (fp_p != FLOAT_MODE_P (mode))
2638 {
2639 enum rtx_code cmp_code;
2640
2641 if (! TARGET_FIX)
2642 return 0;
2643
2644 /* If we have fp<->int register move instructions, do a cmov by
2645 performing the comparison in fp registers, and move the
2646 zero/nonzero value to integer registers, where we can then
2647 use a normal cmov, or vice-versa. */
2648
2649 switch (code)
2650 {
2651 case EQ: case LE: case LT: case LEU: case LTU:
2652 /* We have these compares. */
2653 cmp_code = code, code = NE;
2654 break;
2655
2656 case NE:
2657 /* This must be reversed. */
2658 cmp_code = EQ, code = EQ;
2659 break;
2660
2661 case GE: case GT: case GEU: case GTU:
2662 /* These normally need swapping, but for integer zero we have
2663 special patterns that recognize swapped operands. */
2664 if (!fp_p && op1 == const0_rtx)
2665 cmp_code = code, code = NE;
2666 else
2667 {
2668 cmp_code = swap_condition (code);
2669 code = NE;
2670 tem = op0, op0 = op1, op1 = tem;
2671 }
2672 break;
2673
2674 default:
2675 gcc_unreachable ();
2676 }
2677
2678 tem = gen_reg_rtx (cmp_op_mode);
2679 emit_insn (gen_rtx_SET (VOIDmode, tem,
2680 gen_rtx_fmt_ee (cmp_code, cmp_op_mode,
2681 op0, op1)));
2682
2683 cmp_mode = cmp_op_mode = fp_p ? DImode : DFmode;
2684 op0 = gen_lowpart (cmp_op_mode, tem);
2685 op1 = CONST0_RTX (cmp_op_mode);
2686 fp_p = !fp_p;
2687 local_fast_math = 1;
2688 }
2689
2690 /* We may be able to use a conditional move directly.
2691 This avoids emitting spurious compares. */
2692 if (signed_comparison_operator (cmp, VOIDmode)
2693 && (!fp_p || local_fast_math)
2694 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2695 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2696
2697 /* We can't put the comparison inside the conditional move;
2698 emit a compare instruction and put that inside the
2699 conditional move. Make sure we emit only comparisons we have;
2700 swap or reverse as necessary. */
2701
2702 if (!can_create_pseudo_p ())
2703 return NULL_RTX;
2704
2705 switch (code)
2706 {
2707 case EQ: case LE: case LT: case LEU: case LTU:
2708 /* We have these compares: */
2709 break;
2710
2711 case NE:
2712 /* This must be reversed. */
2713 code = reverse_condition (code);
2714 cmov_code = EQ;
2715 break;
2716
2717 case GE: case GT: case GEU: case GTU:
2718 /* These must be swapped. */
2719 if (op1 != CONST0_RTX (cmp_mode))
2720 {
2721 code = swap_condition (code);
2722 tem = op0, op0 = op1, op1 = tem;
2723 }
2724 break;
2725
2726 default:
2727 gcc_unreachable ();
2728 }
2729
2730 if (!fp_p)
2731 {
2732 if (!reg_or_0_operand (op0, DImode))
2733 op0 = force_reg (DImode, op0);
2734 if (!reg_or_8bit_operand (op1, DImode))
2735 op1 = force_reg (DImode, op1);
2736 }
2737
2738 /* ??? We mark the branch mode to be CCmode to prevent the compare
2739 and cmov from being combined, since the compare insn follows IEEE
2740 rules that the cmov does not. */
2741 if (fp_p && !local_fast_math)
2742 cmov_mode = CCmode;
2743
2744 tem = gen_reg_rtx (cmp_op_mode);
2745 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
2746 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
2747 }
2748
2749 /* Simplify a conditional move of two constants into a setcc with
2750 arithmetic. This is done with a splitter since combine would
2751 just undo the work if done during code generation. It also catches
2752 cases we wouldn't have before cse. */
2753
2754 int
2755 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2756 rtx t_rtx, rtx f_rtx)
2757 {
2758 HOST_WIDE_INT t, f, diff;
2759 enum machine_mode mode;
2760 rtx target, subtarget, tmp;
2761
2762 mode = GET_MODE (dest);
2763 t = INTVAL (t_rtx);
2764 f = INTVAL (f_rtx);
2765 diff = t - f;
2766
2767 if (((code == NE || code == EQ) && diff < 0)
2768 || (code == GE || code == GT))
2769 {
2770 code = reverse_condition (code);
2771 diff = t, t = f, f = diff;
2772 diff = t - f;
2773 }
2774
2775 subtarget = target = dest;
2776 if (mode != DImode)
2777 {
2778 target = gen_lowpart (DImode, dest);
2779 if (can_create_pseudo_p ())
2780 subtarget = gen_reg_rtx (DImode);
2781 else
2782 subtarget = target;
2783 }
2784 /* Below, we must be careful to use copy_rtx on target and subtarget
2785 in intermediate insns, as they may be a subreg rtx, which may not
2786 be shared. */
2787
2788 if (f == 0 && exact_log2 (diff) > 0
2789 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2790 viable over a longer latency cmove. On EV5, the E0 slot is a
2791 scarce resource, and on EV4 shift has the same latency as a cmove. */
2792 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2793 {
2794 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2795 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2796
2797 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2798 GEN_INT (exact_log2 (t)));
2799 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2800 }
2801 else if (f == 0 && t == -1)
2802 {
2803 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2804 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2805
2806 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2807 }
2808 else if (diff == 1 || diff == 4 || diff == 8)
2809 {
2810 rtx add_op;
2811
2812 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2813 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2814
2815 if (diff == 1)
2816 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2817 else
2818 {
2819 add_op = GEN_INT (f);
2820 if (sext_add_operand (add_op, mode))
2821 {
2822 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2823 GEN_INT (diff));
2824 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2825 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2826 }
2827 else
2828 return 0;
2829 }
2830 }
2831 else
2832 return 0;
2833
2834 return 1;
2835 }
2836 \f
2837 /* Look up the function X_floating library function name for the
2838 given operation. */
2839
2840 struct xfloating_op GTY(())
2841 {
2842 const enum rtx_code code;
2843 const char *const GTY((skip)) osf_func;
2844 const char *const GTY((skip)) vms_func;
2845 rtx libcall;
2846 };
2847
2848 static GTY(()) struct xfloating_op xfloating_ops[] =
2849 {
2850 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2851 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2852 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2853 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2854 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2855 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2856 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2857 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2858 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2859 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2860 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2861 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2862 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2863 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2864 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2865 };
2866
2867 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2868 {
2869 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2870 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2871 };
2872
2873 static rtx
2874 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2875 {
2876 struct xfloating_op *ops = xfloating_ops;
2877 long n = ARRAY_SIZE (xfloating_ops);
2878 long i;
2879
2880 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2881
2882 /* How irritating. Nothing to key off for the main table. */
2883 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2884 {
2885 ops = vax_cvt_ops;
2886 n = ARRAY_SIZE (vax_cvt_ops);
2887 }
2888
2889 for (i = 0; i < n; ++i, ++ops)
2890 if (ops->code == code)
2891 {
2892 rtx func = ops->libcall;
2893 if (!func)
2894 {
2895 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2896 ? ops->vms_func : ops->osf_func);
2897 ops->libcall = func;
2898 }
2899 return func;
2900 }
2901
2902 gcc_unreachable ();
2903 }
2904
2905 /* Most X_floating operations take the rounding mode as an argument.
2906 Compute that here. */
2907
2908 static int
2909 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2910 enum alpha_fp_rounding_mode round)
2911 {
2912 int mode;
2913
2914 switch (round)
2915 {
2916 case ALPHA_FPRM_NORM:
2917 mode = 2;
2918 break;
2919 case ALPHA_FPRM_MINF:
2920 mode = 1;
2921 break;
2922 case ALPHA_FPRM_CHOP:
2923 mode = 0;
2924 break;
2925 case ALPHA_FPRM_DYN:
2926 mode = 4;
2927 break;
2928 default:
2929 gcc_unreachable ();
2930
2931 /* XXX For reference, round to +inf is mode = 3. */
2932 }
2933
2934 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2935 mode |= 0x10000;
2936
2937 return mode;
2938 }
2939
2940 /* Emit an X_floating library function call.
2941
2942 Note that these functions do not follow normal calling conventions:
2943 TFmode arguments are passed in two integer registers (as opposed to
2944 indirect); TFmode return values appear in R16+R17.
2945
2946 FUNC is the function to call.
2947 TARGET is where the output belongs.
2948 OPERANDS are the inputs.
2949 NOPERANDS is the count of inputs.
2950 EQUIV is the expression equivalent for the function.
2951 */
2952
2953 static void
2954 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2955 int noperands, rtx equiv)
2956 {
2957 rtx usage = NULL_RTX, tmp, reg;
2958 int regno = 16, i;
2959
2960 start_sequence ();
2961
2962 for (i = 0; i < noperands; ++i)
2963 {
2964 switch (GET_MODE (operands[i]))
2965 {
2966 case TFmode:
2967 reg = gen_rtx_REG (TFmode, regno);
2968 regno += 2;
2969 break;
2970
2971 case DFmode:
2972 reg = gen_rtx_REG (DFmode, regno + 32);
2973 regno += 1;
2974 break;
2975
2976 case VOIDmode:
2977 gcc_assert (GET_CODE (operands[i]) == CONST_INT);
2978 /* FALLTHRU */
2979 case DImode:
2980 reg = gen_rtx_REG (DImode, regno);
2981 regno += 1;
2982 break;
2983
2984 default:
2985 gcc_unreachable ();
2986 }
2987
2988 emit_move_insn (reg, operands[i]);
2989 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
2990 }
2991
2992 switch (GET_MODE (target))
2993 {
2994 case TFmode:
2995 reg = gen_rtx_REG (TFmode, 16);
2996 break;
2997 case DFmode:
2998 reg = gen_rtx_REG (DFmode, 32);
2999 break;
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, 0);
3002 break;
3003 default:
3004 gcc_unreachable ();
3005 }
3006
3007 tmp = gen_rtx_MEM (QImode, func);
3008 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3009 const0_rtx, const0_rtx));
3010 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3011 CONST_OR_PURE_CALL_P (tmp) = 1;
3012
3013 tmp = get_insns ();
3014 end_sequence ();
3015
3016 emit_libcall_block (tmp, target, reg, equiv);
3017 }
3018
3019 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3020
3021 void
3022 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3023 {
3024 rtx func;
3025 int mode;
3026 rtx out_operands[3];
3027
3028 func = alpha_lookup_xfloating_lib_func (code);
3029 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3030
3031 out_operands[0] = operands[1];
3032 out_operands[1] = operands[2];
3033 out_operands[2] = GEN_INT (mode);
3034 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3035 gen_rtx_fmt_ee (code, TFmode, operands[1],
3036 operands[2]));
3037 }
3038
3039 /* Emit an X_floating library function call for a comparison. */
3040
3041 static rtx
3042 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3043 {
3044 enum rtx_code cmp_code, res_code;
3045 rtx func, out, operands[2];
3046
3047 /* X_floating library comparison functions return
3048 -1 unordered
3049 0 false
3050 1 true
3051 Convert the compare against the raw return value. */
3052
3053 cmp_code = *pcode;
3054 switch (cmp_code)
3055 {
3056 case UNORDERED:
3057 cmp_code = EQ;
3058 res_code = LT;
3059 break;
3060 case ORDERED:
3061 cmp_code = EQ;
3062 res_code = GE;
3063 break;
3064 case NE:
3065 res_code = NE;
3066 break;
3067 case EQ:
3068 case LT:
3069 case GT:
3070 case LE:
3071 case GE:
3072 res_code = GT;
3073 break;
3074 default:
3075 gcc_unreachable ();
3076 }
3077 *pcode = res_code;
3078
3079 func = alpha_lookup_xfloating_lib_func (cmp_code);
3080
3081 operands[0] = op0;
3082 operands[1] = op1;
3083 out = gen_reg_rtx (DImode);
3084
3085 /* ??? Strange mode for equiv because what's actually returned
3086 is -1,0,1, not a proper boolean value. */
3087 alpha_emit_xfloating_libcall (func, out, operands, 2,
3088 gen_rtx_fmt_ee (cmp_code, CCmode, op0, op1));
3089
3090 return out;
3091 }
3092
3093 /* Emit an X_floating library function call for a conversion. */
3094
3095 void
3096 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3097 {
3098 int noperands = 1, mode;
3099 rtx out_operands[2];
3100 rtx func;
3101 enum rtx_code code = orig_code;
3102
3103 if (code == UNSIGNED_FIX)
3104 code = FIX;
3105
3106 func = alpha_lookup_xfloating_lib_func (code);
3107
3108 out_operands[0] = operands[1];
3109
3110 switch (code)
3111 {
3112 case FIX:
3113 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3114 out_operands[1] = GEN_INT (mode);
3115 noperands = 2;
3116 break;
3117 case FLOAT_TRUNCATE:
3118 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3119 out_operands[1] = GEN_INT (mode);
3120 noperands = 2;
3121 break;
3122 default:
3123 break;
3124 }
3125
3126 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3127 gen_rtx_fmt_e (orig_code,
3128 GET_MODE (operands[0]),
3129 operands[1]));
3130 }
3131
3132 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3133 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3134 guarantee that the sequence
3135 set (OP[0] OP[2])
3136 set (OP[1] OP[3])
3137 is valid. Naturally, output operand ordering is little-endian.
3138 This is used by *movtf_internal and *movti_internal. */
3139
3140 void
3141 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3142 bool fixup_overlap)
3143 {
3144 switch (GET_CODE (operands[1]))
3145 {
3146 case REG:
3147 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3148 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3149 break;
3150
3151 case MEM:
3152 operands[3] = adjust_address (operands[1], DImode, 8);
3153 operands[2] = adjust_address (operands[1], DImode, 0);
3154 break;
3155
3156 case CONST_INT:
3157 case CONST_DOUBLE:
3158 gcc_assert (operands[1] == CONST0_RTX (mode));
3159 operands[2] = operands[3] = const0_rtx;
3160 break;
3161
3162 default:
3163 gcc_unreachable ();
3164 }
3165
3166 switch (GET_CODE (operands[0]))
3167 {
3168 case REG:
3169 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3170 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3171 break;
3172
3173 case MEM:
3174 operands[1] = adjust_address (operands[0], DImode, 8);
3175 operands[0] = adjust_address (operands[0], DImode, 0);
3176 break;
3177
3178 default:
3179 gcc_unreachable ();
3180 }
3181
3182 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3183 {
3184 rtx tmp;
3185 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3186 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3187 }
3188 }
3189
3190 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3191 op2 is a register containing the sign bit, operation is the
3192 logical operation to be performed. */
3193
3194 void
3195 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3196 {
3197 rtx high_bit = operands[2];
3198 rtx scratch;
3199 int move;
3200
3201 alpha_split_tmode_pair (operands, TFmode, false);
3202
3203 /* Detect three flavors of operand overlap. */
3204 move = 1;
3205 if (rtx_equal_p (operands[0], operands[2]))
3206 move = 0;
3207 else if (rtx_equal_p (operands[1], operands[2]))
3208 {
3209 if (rtx_equal_p (operands[0], high_bit))
3210 move = 2;
3211 else
3212 move = -1;
3213 }
3214
3215 if (move < 0)
3216 emit_move_insn (operands[0], operands[2]);
3217
3218 /* ??? If the destination overlaps both source tf and high_bit, then
3219 assume source tf is dead in its entirety and use the other half
3220 for a scratch register. Otherwise "scratch" is just the proper
3221 destination register. */
3222 scratch = operands[move < 2 ? 1 : 3];
3223
3224 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3225
3226 if (move > 0)
3227 {
3228 emit_move_insn (operands[0], operands[2]);
3229 if (move > 1)
3230 emit_move_insn (operands[1], scratch);
3231 }
3232 }
3233 \f
3234 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3235 unaligned data:
3236
3237 unsigned: signed:
3238 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3239 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3240 lda r3,X(r11) lda r3,X+2(r11)
3241 extwl r1,r3,r1 extql r1,r3,r1
3242 extwh r2,r3,r2 extqh r2,r3,r2
3243 or r1.r2.r1 or r1,r2,r1
3244 sra r1,48,r1
3245
3246 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3247 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3248 lda r3,X(r11) lda r3,X(r11)
3249 extll r1,r3,r1 extll r1,r3,r1
3250 extlh r2,r3,r2 extlh r2,r3,r2
3251 or r1.r2.r1 addl r1,r2,r1
3252
3253 quad: ldq_u r1,X(r11)
3254 ldq_u r2,X+7(r11)
3255 lda r3,X(r11)
3256 extql r1,r3,r1
3257 extqh r2,r3,r2
3258 or r1.r2.r1
3259 */
3260
3261 void
3262 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3263 HOST_WIDE_INT ofs, int sign)
3264 {
3265 rtx meml, memh, addr, extl, exth, tmp, mema;
3266 enum machine_mode mode;
3267
3268 if (TARGET_BWX && size == 2)
3269 {
3270 meml = adjust_address (mem, QImode, ofs);
3271 memh = adjust_address (mem, QImode, ofs+1);
3272 if (BYTES_BIG_ENDIAN)
3273 tmp = meml, meml = memh, memh = tmp;
3274 extl = gen_reg_rtx (DImode);
3275 exth = gen_reg_rtx (DImode);
3276 emit_insn (gen_zero_extendqidi2 (extl, meml));
3277 emit_insn (gen_zero_extendqidi2 (exth, memh));
3278 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3279 NULL, 1, OPTAB_LIB_WIDEN);
3280 addr = expand_simple_binop (DImode, IOR, extl, exth,
3281 NULL, 1, OPTAB_LIB_WIDEN);
3282
3283 if (sign && GET_MODE (tgt) != HImode)
3284 {
3285 addr = gen_lowpart (HImode, addr);
3286 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3287 }
3288 else
3289 {
3290 if (GET_MODE (tgt) != DImode)
3291 addr = gen_lowpart (GET_MODE (tgt), addr);
3292 emit_move_insn (tgt, addr);
3293 }
3294 return;
3295 }
3296
3297 meml = gen_reg_rtx (DImode);
3298 memh = gen_reg_rtx (DImode);
3299 addr = gen_reg_rtx (DImode);
3300 extl = gen_reg_rtx (DImode);
3301 exth = gen_reg_rtx (DImode);
3302
3303 mema = XEXP (mem, 0);
3304 if (GET_CODE (mema) == LO_SUM)
3305 mema = force_reg (Pmode, mema);
3306
3307 /* AND addresses cannot be in any alias set, since they may implicitly
3308 alias surrounding code. Ideally we'd have some alias set that
3309 covered all types except those with alignment 8 or higher. */
3310
3311 tmp = change_address (mem, DImode,
3312 gen_rtx_AND (DImode,
3313 plus_constant (mema, ofs),
3314 GEN_INT (-8)));
3315 set_mem_alias_set (tmp, 0);
3316 emit_move_insn (meml, tmp);
3317
3318 tmp = change_address (mem, DImode,
3319 gen_rtx_AND (DImode,
3320 plus_constant (mema, ofs + size - 1),
3321 GEN_INT (-8)));
3322 set_mem_alias_set (tmp, 0);
3323 emit_move_insn (memh, tmp);
3324
3325 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3326 {
3327 emit_move_insn (addr, plus_constant (mema, -1));
3328
3329 emit_insn (gen_extqh_be (extl, meml, addr));
3330 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3331
3332 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3333 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3334 addr, 1, OPTAB_WIDEN);
3335 }
3336 else if (sign && size == 2)
3337 {
3338 emit_move_insn (addr, plus_constant (mema, ofs+2));
3339
3340 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3341 emit_insn (gen_extqh_le (exth, memh, addr));
3342
3343 /* We must use tgt here for the target. Alpha-vms port fails if we use
3344 addr for the target, because addr is marked as a pointer and combine
3345 knows that pointers are always sign-extended 32-bit values. */
3346 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3347 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3348 addr, 1, OPTAB_WIDEN);
3349 }
3350 else
3351 {
3352 if (WORDS_BIG_ENDIAN)
3353 {
3354 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3355 switch ((int) size)
3356 {
3357 case 2:
3358 emit_insn (gen_extwh_be (extl, meml, addr));
3359 mode = HImode;
3360 break;
3361
3362 case 4:
3363 emit_insn (gen_extlh_be (extl, meml, addr));
3364 mode = SImode;
3365 break;
3366
3367 case 8:
3368 emit_insn (gen_extqh_be (extl, meml, addr));
3369 mode = DImode;
3370 break;
3371
3372 default:
3373 gcc_unreachable ();
3374 }
3375 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3376 }
3377 else
3378 {
3379 emit_move_insn (addr, plus_constant (mema, ofs));
3380 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3381 switch ((int) size)
3382 {
3383 case 2:
3384 emit_insn (gen_extwh_le (exth, memh, addr));
3385 mode = HImode;
3386 break;
3387
3388 case 4:
3389 emit_insn (gen_extlh_le (exth, memh, addr));
3390 mode = SImode;
3391 break;
3392
3393 case 8:
3394 emit_insn (gen_extqh_le (exth, memh, addr));
3395 mode = DImode;
3396 break;
3397
3398 default:
3399 gcc_unreachable ();
3400 }
3401 }
3402
3403 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3404 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3405 sign, OPTAB_WIDEN);
3406 }
3407
3408 if (addr != tgt)
3409 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3410 }
3411
3412 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3413
3414 void
3415 alpha_expand_unaligned_store (rtx dst, rtx src,
3416 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3417 {
3418 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3419
3420 if (TARGET_BWX && size == 2)
3421 {
3422 if (src != const0_rtx)
3423 {
3424 dstl = gen_lowpart (QImode, src);
3425 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3426 NULL, 1, OPTAB_LIB_WIDEN);
3427 dsth = gen_lowpart (QImode, dsth);
3428 }
3429 else
3430 dstl = dsth = const0_rtx;
3431
3432 meml = adjust_address (dst, QImode, ofs);
3433 memh = adjust_address (dst, QImode, ofs+1);
3434 if (BYTES_BIG_ENDIAN)
3435 addr = meml, meml = memh, memh = addr;
3436
3437 emit_move_insn (meml, dstl);
3438 emit_move_insn (memh, dsth);
3439 return;
3440 }
3441
3442 dstl = gen_reg_rtx (DImode);
3443 dsth = gen_reg_rtx (DImode);
3444 insl = gen_reg_rtx (DImode);
3445 insh = gen_reg_rtx (DImode);
3446
3447 dsta = XEXP (dst, 0);
3448 if (GET_CODE (dsta) == LO_SUM)
3449 dsta = force_reg (Pmode, dsta);
3450
3451 /* AND addresses cannot be in any alias set, since they may implicitly
3452 alias surrounding code. Ideally we'd have some alias set that
3453 covered all types except those with alignment 8 or higher. */
3454
3455 meml = change_address (dst, DImode,
3456 gen_rtx_AND (DImode,
3457 plus_constant (dsta, ofs),
3458 GEN_INT (-8)));
3459 set_mem_alias_set (meml, 0);
3460
3461 memh = change_address (dst, DImode,
3462 gen_rtx_AND (DImode,
3463 plus_constant (dsta, ofs + size - 1),
3464 GEN_INT (-8)));
3465 set_mem_alias_set (memh, 0);
3466
3467 emit_move_insn (dsth, memh);
3468 emit_move_insn (dstl, meml);
3469 if (WORDS_BIG_ENDIAN)
3470 {
3471 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3472
3473 if (src != const0_rtx)
3474 {
3475 switch ((int) size)
3476 {
3477 case 2:
3478 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3479 break;
3480 case 4:
3481 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3482 break;
3483 case 8:
3484 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3485 break;
3486 }
3487 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3488 GEN_INT (size*8), addr));
3489 }
3490
3491 switch ((int) size)
3492 {
3493 case 2:
3494 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3495 break;
3496 case 4:
3497 {
3498 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3499 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3500 break;
3501 }
3502 case 8:
3503 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3504 break;
3505 }
3506
3507 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3508 }
3509 else
3510 {
3511 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3512
3513 if (src != CONST0_RTX (GET_MODE (src)))
3514 {
3515 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3516 GEN_INT (size*8), addr));
3517
3518 switch ((int) size)
3519 {
3520 case 2:
3521 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3522 break;
3523 case 4:
3524 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3525 break;
3526 case 8:
3527 emit_insn (gen_insql_le (insl, src, addr));
3528 break;
3529 }
3530 }
3531
3532 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3533
3534 switch ((int) size)
3535 {
3536 case 2:
3537 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3538 break;
3539 case 4:
3540 {
3541 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3542 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3543 break;
3544 }
3545 case 8:
3546 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3547 break;
3548 }
3549 }
3550
3551 if (src != CONST0_RTX (GET_MODE (src)))
3552 {
3553 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3554 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3555 }
3556
3557 if (WORDS_BIG_ENDIAN)
3558 {
3559 emit_move_insn (meml, dstl);
3560 emit_move_insn (memh, dsth);
3561 }
3562 else
3563 {
3564 /* Must store high before low for degenerate case of aligned. */
3565 emit_move_insn (memh, dsth);
3566 emit_move_insn (meml, dstl);
3567 }
3568 }
3569
3570 /* The block move code tries to maximize speed by separating loads and
3571 stores at the expense of register pressure: we load all of the data
3572 before we store it back out. There are two secondary effects worth
3573 mentioning, that this speeds copying to/from aligned and unaligned
3574 buffers, and that it makes the code significantly easier to write. */
3575
3576 #define MAX_MOVE_WORDS 8
3577
3578 /* Load an integral number of consecutive unaligned quadwords. */
3579
3580 static void
3581 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3582 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3583 {
3584 rtx const im8 = GEN_INT (-8);
3585 rtx const i64 = GEN_INT (64);
3586 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3587 rtx sreg, areg, tmp, smema;
3588 HOST_WIDE_INT i;
3589
3590 smema = XEXP (smem, 0);
3591 if (GET_CODE (smema) == LO_SUM)
3592 smema = force_reg (Pmode, smema);
3593
3594 /* Generate all the tmp registers we need. */
3595 for (i = 0; i < words; ++i)
3596 {
3597 data_regs[i] = out_regs[i];
3598 ext_tmps[i] = gen_reg_rtx (DImode);
3599 }
3600 data_regs[words] = gen_reg_rtx (DImode);
3601
3602 if (ofs != 0)
3603 smem = adjust_address (smem, GET_MODE (smem), ofs);
3604
3605 /* Load up all of the source data. */
3606 for (i = 0; i < words; ++i)
3607 {
3608 tmp = change_address (smem, DImode,
3609 gen_rtx_AND (DImode,
3610 plus_constant (smema, 8*i),
3611 im8));
3612 set_mem_alias_set (tmp, 0);
3613 emit_move_insn (data_regs[i], tmp);
3614 }
3615
3616 tmp = change_address (smem, DImode,
3617 gen_rtx_AND (DImode,
3618 plus_constant (smema, 8*words - 1),
3619 im8));
3620 set_mem_alias_set (tmp, 0);
3621 emit_move_insn (data_regs[words], tmp);
3622
3623 /* Extract the half-word fragments. Unfortunately DEC decided to make
3624 extxh with offset zero a noop instead of zeroing the register, so
3625 we must take care of that edge condition ourselves with cmov. */
3626
3627 sreg = copy_addr_to_reg (smema);
3628 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3629 1, OPTAB_WIDEN);
3630 if (WORDS_BIG_ENDIAN)
3631 emit_move_insn (sreg, plus_constant (sreg, 7));
3632 for (i = 0; i < words; ++i)
3633 {
3634 if (WORDS_BIG_ENDIAN)
3635 {
3636 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3637 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3638 }
3639 else
3640 {
3641 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3642 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3643 }
3644 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3645 gen_rtx_IF_THEN_ELSE (DImode,
3646 gen_rtx_EQ (DImode, areg,
3647 const0_rtx),
3648 const0_rtx, ext_tmps[i])));
3649 }
3650
3651 /* Merge the half-words into whole words. */
3652 for (i = 0; i < words; ++i)
3653 {
3654 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3655 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3656 }
3657 }
3658
3659 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3660 may be NULL to store zeros. */
3661
3662 static void
3663 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3664 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3665 {
3666 rtx const im8 = GEN_INT (-8);
3667 rtx const i64 = GEN_INT (64);
3668 rtx ins_tmps[MAX_MOVE_WORDS];
3669 rtx st_tmp_1, st_tmp_2, dreg;
3670 rtx st_addr_1, st_addr_2, dmema;
3671 HOST_WIDE_INT i;
3672
3673 dmema = XEXP (dmem, 0);
3674 if (GET_CODE (dmema) == LO_SUM)
3675 dmema = force_reg (Pmode, dmema);
3676
3677 /* Generate all the tmp registers we need. */
3678 if (data_regs != NULL)
3679 for (i = 0; i < words; ++i)
3680 ins_tmps[i] = gen_reg_rtx(DImode);
3681 st_tmp_1 = gen_reg_rtx(DImode);
3682 st_tmp_2 = gen_reg_rtx(DImode);
3683
3684 if (ofs != 0)
3685 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3686
3687 st_addr_2 = change_address (dmem, DImode,
3688 gen_rtx_AND (DImode,
3689 plus_constant (dmema, words*8 - 1),
3690 im8));
3691 set_mem_alias_set (st_addr_2, 0);
3692
3693 st_addr_1 = change_address (dmem, DImode,
3694 gen_rtx_AND (DImode, dmema, im8));
3695 set_mem_alias_set (st_addr_1, 0);
3696
3697 /* Load up the destination end bits. */
3698 emit_move_insn (st_tmp_2, st_addr_2);
3699 emit_move_insn (st_tmp_1, st_addr_1);
3700
3701 /* Shift the input data into place. */
3702 dreg = copy_addr_to_reg (dmema);
3703 if (WORDS_BIG_ENDIAN)
3704 emit_move_insn (dreg, plus_constant (dreg, 7));
3705 if (data_regs != NULL)
3706 {
3707 for (i = words-1; i >= 0; --i)
3708 {
3709 if (WORDS_BIG_ENDIAN)
3710 {
3711 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3712 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3713 }
3714 else
3715 {
3716 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3717 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3718 }
3719 }
3720 for (i = words-1; i > 0; --i)
3721 {
3722 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3723 ins_tmps[i-1], ins_tmps[i-1], 1,
3724 OPTAB_WIDEN);
3725 }
3726 }
3727
3728 /* Split and merge the ends with the destination data. */
3729 if (WORDS_BIG_ENDIAN)
3730 {
3731 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3732 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3733 }
3734 else
3735 {
3736 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3737 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3738 }
3739
3740 if (data_regs != NULL)
3741 {
3742 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3743 st_tmp_2, 1, OPTAB_WIDEN);
3744 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3745 st_tmp_1, 1, OPTAB_WIDEN);
3746 }
3747
3748 /* Store it all. */
3749 if (WORDS_BIG_ENDIAN)
3750 emit_move_insn (st_addr_1, st_tmp_1);
3751 else
3752 emit_move_insn (st_addr_2, st_tmp_2);
3753 for (i = words-1; i > 0; --i)
3754 {
3755 rtx tmp = change_address (dmem, DImode,
3756 gen_rtx_AND (DImode,
3757 plus_constant(dmema,
3758 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3759 im8));
3760 set_mem_alias_set (tmp, 0);
3761 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3762 }
3763 if (WORDS_BIG_ENDIAN)
3764 emit_move_insn (st_addr_2, st_tmp_2);
3765 else
3766 emit_move_insn (st_addr_1, st_tmp_1);
3767 }
3768
3769
3770 /* Expand string/block move operations.
3771
3772 operands[0] is the pointer to the destination.
3773 operands[1] is the pointer to the source.
3774 operands[2] is the number of bytes to move.
3775 operands[3] is the alignment. */
3776
3777 int
3778 alpha_expand_block_move (rtx operands[])
3779 {
3780 rtx bytes_rtx = operands[2];
3781 rtx align_rtx = operands[3];
3782 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3783 HOST_WIDE_INT bytes = orig_bytes;
3784 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3785 HOST_WIDE_INT dst_align = src_align;
3786 rtx orig_src = operands[1];
3787 rtx orig_dst = operands[0];
3788 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3789 rtx tmp;
3790 unsigned int i, words, ofs, nregs = 0;
3791
3792 if (orig_bytes <= 0)
3793 return 1;
3794 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3795 return 0;
3796
3797 /* Look for additional alignment information from recorded register info. */
3798
3799 tmp = XEXP (orig_src, 0);
3800 if (GET_CODE (tmp) == REG)
3801 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3802 else if (GET_CODE (tmp) == PLUS
3803 && GET_CODE (XEXP (tmp, 0)) == REG
3804 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3805 {
3806 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3807 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3808
3809 if (a > src_align)
3810 {
3811 if (a >= 64 && c % 8 == 0)
3812 src_align = 64;
3813 else if (a >= 32 && c % 4 == 0)
3814 src_align = 32;
3815 else if (a >= 16 && c % 2 == 0)
3816 src_align = 16;
3817 }
3818 }
3819
3820 tmp = XEXP (orig_dst, 0);
3821 if (GET_CODE (tmp) == REG)
3822 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3823 else if (GET_CODE (tmp) == PLUS
3824 && GET_CODE (XEXP (tmp, 0)) == REG
3825 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
3826 {
3827 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3828 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3829
3830 if (a > dst_align)
3831 {
3832 if (a >= 64 && c % 8 == 0)
3833 dst_align = 64;
3834 else if (a >= 32 && c % 4 == 0)
3835 dst_align = 32;
3836 else if (a >= 16 && c % 2 == 0)
3837 dst_align = 16;
3838 }
3839 }
3840
3841 ofs = 0;
3842 if (src_align >= 64 && bytes >= 8)
3843 {
3844 words = bytes / 8;
3845
3846 for (i = 0; i < words; ++i)
3847 data_regs[nregs + i] = gen_reg_rtx (DImode);
3848
3849 for (i = 0; i < words; ++i)
3850 emit_move_insn (data_regs[nregs + i],
3851 adjust_address (orig_src, DImode, ofs + i * 8));
3852
3853 nregs += words;
3854 bytes -= words * 8;
3855 ofs += words * 8;
3856 }
3857
3858 if (src_align >= 32 && bytes >= 4)
3859 {
3860 words = bytes / 4;
3861
3862 for (i = 0; i < words; ++i)
3863 data_regs[nregs + i] = gen_reg_rtx (SImode);
3864
3865 for (i = 0; i < words; ++i)
3866 emit_move_insn (data_regs[nregs + i],
3867 adjust_address (orig_src, SImode, ofs + i * 4));
3868
3869 nregs += words;
3870 bytes -= words * 4;
3871 ofs += words * 4;
3872 }
3873
3874 if (bytes >= 8)
3875 {
3876 words = bytes / 8;
3877
3878 for (i = 0; i < words+1; ++i)
3879 data_regs[nregs + i] = gen_reg_rtx (DImode);
3880
3881 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3882 words, ofs);
3883
3884 nregs += words;
3885 bytes -= words * 8;
3886 ofs += words * 8;
3887 }
3888
3889 if (! TARGET_BWX && bytes >= 4)
3890 {
3891 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3892 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3893 bytes -= 4;
3894 ofs += 4;
3895 }
3896
3897 if (bytes >= 2)
3898 {
3899 if (src_align >= 16)
3900 {
3901 do {
3902 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3903 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3904 bytes -= 2;
3905 ofs += 2;
3906 } while (bytes >= 2);
3907 }
3908 else if (! TARGET_BWX)
3909 {
3910 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3911 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3912 bytes -= 2;
3913 ofs += 2;
3914 }
3915 }
3916
3917 while (bytes > 0)
3918 {
3919 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3920 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3921 bytes -= 1;
3922 ofs += 1;
3923 }
3924
3925 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3926
3927 /* Now save it back out again. */
3928
3929 i = 0, ofs = 0;
3930
3931 /* Write out the data in whatever chunks reading the source allowed. */
3932 if (dst_align >= 64)
3933 {
3934 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3935 {
3936 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3937 data_regs[i]);
3938 ofs += 8;
3939 i++;
3940 }
3941 }
3942
3943 if (dst_align >= 32)
3944 {
3945 /* If the source has remaining DImode regs, write them out in
3946 two pieces. */
3947 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3948 {
3949 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3950 NULL_RTX, 1, OPTAB_WIDEN);
3951
3952 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3953 gen_lowpart (SImode, data_regs[i]));
3954 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3955 gen_lowpart (SImode, tmp));
3956 ofs += 8;
3957 i++;
3958 }
3959
3960 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3961 {
3962 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3963 data_regs[i]);
3964 ofs += 4;
3965 i++;
3966 }
3967 }
3968
3969 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3970 {
3971 /* Write out a remaining block of words using unaligned methods. */
3972
3973 for (words = 1; i + words < nregs; words++)
3974 if (GET_MODE (data_regs[i + words]) != DImode)
3975 break;
3976
3977 if (words == 1)
3978 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
3979 else
3980 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
3981 words, ofs);
3982
3983 i += words;
3984 ofs += words * 8;
3985 }
3986
3987 /* Due to the above, this won't be aligned. */
3988 /* ??? If we have more than one of these, consider constructing full
3989 words in registers and using alpha_expand_unaligned_store_words. */
3990 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3991 {
3992 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
3993 ofs += 4;
3994 i++;
3995 }
3996
3997 if (dst_align >= 16)
3998 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
3999 {
4000 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4001 i++;
4002 ofs += 2;
4003 }
4004 else
4005 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4006 {
4007 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4008 i++;
4009 ofs += 2;
4010 }
4011
4012 /* The remainder must be byte copies. */
4013 while (i < nregs)
4014 {
4015 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4016 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4017 i++;
4018 ofs += 1;
4019 }
4020
4021 return 1;
4022 }
4023
4024 int
4025 alpha_expand_block_clear (rtx operands[])
4026 {
4027 rtx bytes_rtx = operands[1];
4028 rtx align_rtx = operands[3];
4029 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4030 HOST_WIDE_INT bytes = orig_bytes;
4031 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4032 HOST_WIDE_INT alignofs = 0;
4033 rtx orig_dst = operands[0];
4034 rtx tmp;
4035 int i, words, ofs = 0;
4036
4037 if (orig_bytes <= 0)
4038 return 1;
4039 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4040 return 0;
4041
4042 /* Look for stricter alignment. */
4043 tmp = XEXP (orig_dst, 0);
4044 if (GET_CODE (tmp) == REG)
4045 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4046 else if (GET_CODE (tmp) == PLUS
4047 && GET_CODE (XEXP (tmp, 0)) == REG
4048 && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
4049 {
4050 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4051 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4052
4053 if (a > align)
4054 {
4055 if (a >= 64)
4056 align = a, alignofs = 8 - c % 8;
4057 else if (a >= 32)
4058 align = a, alignofs = 4 - c % 4;
4059 else if (a >= 16)
4060 align = a, alignofs = 2 - c % 2;
4061 }
4062 }
4063
4064 /* Handle an unaligned prefix first. */
4065
4066 if (alignofs > 0)
4067 {
4068 #if HOST_BITS_PER_WIDE_INT >= 64
4069 /* Given that alignofs is bounded by align, the only time BWX could
4070 generate three stores is for a 7 byte fill. Prefer two individual
4071 stores over a load/mask/store sequence. */
4072 if ((!TARGET_BWX || alignofs == 7)
4073 && align >= 32
4074 && !(alignofs == 4 && bytes >= 4))
4075 {
4076 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4077 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4078 rtx mem, tmp;
4079 HOST_WIDE_INT mask;
4080
4081 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4082 set_mem_alias_set (mem, 0);
4083
4084 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4085 if (bytes < alignofs)
4086 {
4087 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4088 ofs += bytes;
4089 bytes = 0;
4090 }
4091 else
4092 {
4093 bytes -= alignofs;
4094 ofs += alignofs;
4095 }
4096 alignofs = 0;
4097
4098 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4099 NULL_RTX, 1, OPTAB_WIDEN);
4100
4101 emit_move_insn (mem, tmp);
4102 }
4103 #endif
4104
4105 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4106 {
4107 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4108 bytes -= 1;
4109 ofs += 1;
4110 alignofs -= 1;
4111 }
4112 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4113 {
4114 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4115 bytes -= 2;
4116 ofs += 2;
4117 alignofs -= 2;
4118 }
4119 if (alignofs == 4 && bytes >= 4)
4120 {
4121 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4122 bytes -= 4;
4123 ofs += 4;
4124 alignofs = 0;
4125 }
4126
4127 /* If we've not used the extra lead alignment information by now,
4128 we won't be able to. Downgrade align to match what's left over. */
4129 if (alignofs > 0)
4130 {
4131 alignofs = alignofs & -alignofs;
4132 align = MIN (align, alignofs * BITS_PER_UNIT);
4133 }
4134 }
4135
4136 /* Handle a block of contiguous long-words. */
4137
4138 if (align >= 64 && bytes >= 8)
4139 {
4140 words = bytes / 8;
4141
4142 for (i = 0; i < words; ++i)
4143 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4144 const0_rtx);
4145
4146 bytes -= words * 8;
4147 ofs += words * 8;
4148 }
4149
4150 /* If the block is large and appropriately aligned, emit a single
4151 store followed by a sequence of stq_u insns. */
4152
4153 if (align >= 32 && bytes > 16)
4154 {
4155 rtx orig_dsta;
4156
4157 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4158 bytes -= 4;
4159 ofs += 4;
4160
4161 orig_dsta = XEXP (orig_dst, 0);
4162 if (GET_CODE (orig_dsta) == LO_SUM)
4163 orig_dsta = force_reg (Pmode, orig_dsta);
4164
4165 words = bytes / 8;
4166 for (i = 0; i < words; ++i)
4167 {
4168 rtx mem
4169 = change_address (orig_dst, DImode,
4170 gen_rtx_AND (DImode,
4171 plus_constant (orig_dsta, ofs + i*8),
4172 GEN_INT (-8)));
4173 set_mem_alias_set (mem, 0);
4174 emit_move_insn (mem, const0_rtx);
4175 }
4176
4177 /* Depending on the alignment, the first stq_u may have overlapped
4178 with the initial stl, which means that the last stq_u didn't
4179 write as much as it would appear. Leave those questionable bytes
4180 unaccounted for. */
4181 bytes -= words * 8 - 4;
4182 ofs += words * 8 - 4;
4183 }
4184
4185 /* Handle a smaller block of aligned words. */
4186
4187 if ((align >= 64 && bytes == 4)
4188 || (align == 32 && bytes >= 4))
4189 {
4190 words = bytes / 4;
4191
4192 for (i = 0; i < words; ++i)
4193 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4194 const0_rtx);
4195
4196 bytes -= words * 4;
4197 ofs += words * 4;
4198 }
4199
4200 /* An unaligned block uses stq_u stores for as many as possible. */
4201
4202 if (bytes >= 8)
4203 {
4204 words = bytes / 8;
4205
4206 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4207
4208 bytes -= words * 8;
4209 ofs += words * 8;
4210 }
4211
4212 /* Next clean up any trailing pieces. */
4213
4214 #if HOST_BITS_PER_WIDE_INT >= 64
4215 /* Count the number of bits in BYTES for which aligned stores could
4216 be emitted. */
4217 words = 0;
4218 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4219 if (bytes & i)
4220 words += 1;
4221
4222 /* If we have appropriate alignment (and it wouldn't take too many
4223 instructions otherwise), mask out the bytes we need. */
4224 if (TARGET_BWX ? words > 2 : bytes > 0)
4225 {
4226 if (align >= 64)
4227 {
4228 rtx mem, tmp;
4229 HOST_WIDE_INT mask;
4230
4231 mem = adjust_address (orig_dst, DImode, ofs);
4232 set_mem_alias_set (mem, 0);
4233
4234 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4235
4236 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4237 NULL_RTX, 1, OPTAB_WIDEN);
4238
4239 emit_move_insn (mem, tmp);
4240 return 1;
4241 }
4242 else if (align >= 32 && bytes < 4)
4243 {
4244 rtx mem, tmp;
4245 HOST_WIDE_INT mask;
4246
4247 mem = adjust_address (orig_dst, SImode, ofs);
4248 set_mem_alias_set (mem, 0);
4249
4250 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4251
4252 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4253 NULL_RTX, 1, OPTAB_WIDEN);
4254
4255 emit_move_insn (mem, tmp);
4256 return 1;
4257 }
4258 }
4259 #endif
4260
4261 if (!TARGET_BWX && bytes >= 4)
4262 {
4263 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4264 bytes -= 4;
4265 ofs += 4;
4266 }
4267
4268 if (bytes >= 2)
4269 {
4270 if (align >= 16)
4271 {
4272 do {
4273 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4274 const0_rtx);
4275 bytes -= 2;
4276 ofs += 2;
4277 } while (bytes >= 2);
4278 }
4279 else if (! TARGET_BWX)
4280 {
4281 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4282 bytes -= 2;
4283 ofs += 2;
4284 }
4285 }
4286
4287 while (bytes > 0)
4288 {
4289 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4290 bytes -= 1;
4291 ofs += 1;
4292 }
4293
4294 return 1;
4295 }
4296
4297 /* Returns a mask so that zap(x, value) == x & mask. */
4298
4299 rtx
4300 alpha_expand_zap_mask (HOST_WIDE_INT value)
4301 {
4302 rtx result;
4303 int i;
4304
4305 if (HOST_BITS_PER_WIDE_INT >= 64)
4306 {
4307 HOST_WIDE_INT mask = 0;
4308
4309 for (i = 7; i >= 0; --i)
4310 {
4311 mask <<= 8;
4312 if (!((value >> i) & 1))
4313 mask |= 0xff;
4314 }
4315
4316 result = gen_int_mode (mask, DImode);
4317 }
4318 else
4319 {
4320 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4321
4322 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4323
4324 for (i = 7; i >= 4; --i)
4325 {
4326 mask_hi <<= 8;
4327 if (!((value >> i) & 1))
4328 mask_hi |= 0xff;
4329 }
4330
4331 for (i = 3; i >= 0; --i)
4332 {
4333 mask_lo <<= 8;
4334 if (!((value >> i) & 1))
4335 mask_lo |= 0xff;
4336 }
4337
4338 result = immed_double_const (mask_lo, mask_hi, DImode);
4339 }
4340
4341 return result;
4342 }
4343
4344 void
4345 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4346 enum machine_mode mode,
4347 rtx op0, rtx op1, rtx op2)
4348 {
4349 op0 = gen_lowpart (mode, op0);
4350
4351 if (op1 == const0_rtx)
4352 op1 = CONST0_RTX (mode);
4353 else
4354 op1 = gen_lowpart (mode, op1);
4355
4356 if (op2 == const0_rtx)
4357 op2 = CONST0_RTX (mode);
4358 else
4359 op2 = gen_lowpart (mode, op2);
4360
4361 emit_insn ((*gen) (op0, op1, op2));
4362 }
4363
4364 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4365 COND is true. Mark the jump as unlikely to be taken. */
4366
4367 static void
4368 emit_unlikely_jump (rtx cond, rtx label)
4369 {
4370 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4371 rtx x;
4372
4373 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4374 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4375 REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
4376 }
4377
4378 /* A subroutine of the atomic operation splitters. Emit a load-locked
4379 instruction in MODE. */
4380
4381 static void
4382 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4383 {
4384 rtx (*fn) (rtx, rtx) = NULL;
4385 if (mode == SImode)
4386 fn = gen_load_locked_si;
4387 else if (mode == DImode)
4388 fn = gen_load_locked_di;
4389 emit_insn (fn (reg, mem));
4390 }
4391
4392 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4393 instruction in MODE. */
4394
4395 static void
4396 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4397 {
4398 rtx (*fn) (rtx, rtx, rtx) = NULL;
4399 if (mode == SImode)
4400 fn = gen_store_conditional_si;
4401 else if (mode == DImode)
4402 fn = gen_store_conditional_di;
4403 emit_insn (fn (res, mem, val));
4404 }
4405
4406 /* A subroutine of the atomic operation splitters. Emit an insxl
4407 instruction in MODE. */
4408
4409 static rtx
4410 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4411 {
4412 rtx ret = gen_reg_rtx (DImode);
4413 rtx (*fn) (rtx, rtx, rtx);
4414
4415 if (WORDS_BIG_ENDIAN)
4416 {
4417 if (mode == QImode)
4418 fn = gen_insbl_be;
4419 else
4420 fn = gen_inswl_be;
4421 }
4422 else
4423 {
4424 if (mode == QImode)
4425 fn = gen_insbl_le;
4426 else
4427 fn = gen_inswl_le;
4428 }
4429 /* The insbl and inswl patterns require a register operand. */
4430 op1 = force_reg (mode, op1);
4431 emit_insn (fn (ret, op1, op2));
4432
4433 return ret;
4434 }
4435
4436 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4437 to perform. MEM is the memory on which to operate. VAL is the second
4438 operand of the binary operator. BEFORE and AFTER are optional locations to
4439 return the value of MEM either before of after the operation. SCRATCH is
4440 a scratch register. */
4441
4442 void
4443 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4444 rtx before, rtx after, rtx scratch)
4445 {
4446 enum machine_mode mode = GET_MODE (mem);
4447 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4448
4449 emit_insn (gen_memory_barrier ());
4450
4451 label = gen_label_rtx ();
4452 emit_label (label);
4453 label = gen_rtx_LABEL_REF (DImode, label);
4454
4455 if (before == NULL)
4456 before = scratch;
4457 emit_load_locked (mode, before, mem);
4458
4459 if (code == NOT)
4460 x = gen_rtx_AND (mode, gen_rtx_NOT (mode, before), val);
4461 else
4462 x = gen_rtx_fmt_ee (code, mode, before, val);
4463 if (after)
4464 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4465 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4466
4467 emit_store_conditional (mode, cond, mem, scratch);
4468
4469 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4470 emit_unlikely_jump (x, label);
4471
4472 emit_insn (gen_memory_barrier ());
4473 }
4474
4475 /* Expand a compare and swap operation. */
4476
4477 void
4478 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4479 rtx scratch)
4480 {
4481 enum machine_mode mode = GET_MODE (mem);
4482 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4483
4484 emit_insn (gen_memory_barrier ());
4485
4486 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4487 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4488 emit_label (XEXP (label1, 0));
4489
4490 emit_load_locked (mode, retval, mem);
4491
4492 x = gen_lowpart (DImode, retval);
4493 if (oldval == const0_rtx)
4494 x = gen_rtx_NE (DImode, x, const0_rtx);
4495 else
4496 {
4497 x = gen_rtx_EQ (DImode, x, oldval);
4498 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4499 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4500 }
4501 emit_unlikely_jump (x, label2);
4502
4503 emit_move_insn (scratch, newval);
4504 emit_store_conditional (mode, cond, mem, scratch);
4505
4506 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4507 emit_unlikely_jump (x, label1);
4508
4509 emit_insn (gen_memory_barrier ());
4510 emit_label (XEXP (label2, 0));
4511 }
4512
4513 void
4514 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4515 {
4516 enum machine_mode mode = GET_MODE (mem);
4517 rtx addr, align, wdst;
4518 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4519
4520 addr = force_reg (DImode, XEXP (mem, 0));
4521 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4522 NULL_RTX, 1, OPTAB_DIRECT);
4523
4524 oldval = convert_modes (DImode, mode, oldval, 1);
4525 newval = emit_insxl (mode, newval, addr);
4526
4527 wdst = gen_reg_rtx (DImode);
4528 if (mode == QImode)
4529 fn5 = gen_sync_compare_and_swapqi_1;
4530 else
4531 fn5 = gen_sync_compare_and_swaphi_1;
4532 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4533
4534 emit_move_insn (dst, gen_lowpart (mode, wdst));
4535 }
4536
4537 void
4538 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4539 rtx oldval, rtx newval, rtx align,
4540 rtx scratch, rtx cond)
4541 {
4542 rtx label1, label2, mem, width, mask, x;
4543
4544 mem = gen_rtx_MEM (DImode, align);
4545 MEM_VOLATILE_P (mem) = 1;
4546
4547 emit_insn (gen_memory_barrier ());
4548 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4549 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4550 emit_label (XEXP (label1, 0));
4551
4552 emit_load_locked (DImode, scratch, mem);
4553
4554 width = GEN_INT (GET_MODE_BITSIZE (mode));
4555 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4556 if (WORDS_BIG_ENDIAN)
4557 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4558 else
4559 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4560
4561 if (oldval == const0_rtx)
4562 x = gen_rtx_NE (DImode, dest, const0_rtx);
4563 else
4564 {
4565 x = gen_rtx_EQ (DImode, dest, oldval);
4566 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4567 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4568 }
4569 emit_unlikely_jump (x, label2);
4570
4571 if (WORDS_BIG_ENDIAN)
4572 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4573 else
4574 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4575 emit_insn (gen_iordi3 (scratch, scratch, newval));
4576
4577 emit_store_conditional (DImode, scratch, mem, scratch);
4578
4579 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4580 emit_unlikely_jump (x, label1);
4581
4582 emit_insn (gen_memory_barrier ());
4583 emit_label (XEXP (label2, 0));
4584 }
4585
4586 /* Expand an atomic exchange operation. */
4587
4588 void
4589 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4590 {
4591 enum machine_mode mode = GET_MODE (mem);
4592 rtx label, x, cond = gen_lowpart (DImode, scratch);
4593
4594 emit_insn (gen_memory_barrier ());
4595
4596 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4597 emit_label (XEXP (label, 0));
4598
4599 emit_load_locked (mode, retval, mem);
4600 emit_move_insn (scratch, val);
4601 emit_store_conditional (mode, cond, mem, scratch);
4602
4603 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4604 emit_unlikely_jump (x, label);
4605 }
4606
4607 void
4608 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4609 {
4610 enum machine_mode mode = GET_MODE (mem);
4611 rtx addr, align, wdst;
4612 rtx (*fn4) (rtx, rtx, rtx, rtx);
4613
4614 /* Force the address into a register. */
4615 addr = force_reg (DImode, XEXP (mem, 0));
4616
4617 /* Align it to a multiple of 8. */
4618 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4619 NULL_RTX, 1, OPTAB_DIRECT);
4620
4621 /* Insert val into the correct byte location within the word. */
4622 val = emit_insxl (mode, val, addr);
4623
4624 wdst = gen_reg_rtx (DImode);
4625 if (mode == QImode)
4626 fn4 = gen_sync_lock_test_and_setqi_1;
4627 else
4628 fn4 = gen_sync_lock_test_and_sethi_1;
4629 emit_insn (fn4 (wdst, addr, val, align));
4630
4631 emit_move_insn (dst, gen_lowpart (mode, wdst));
4632 }
4633
4634 void
4635 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4636 rtx val, rtx align, rtx scratch)
4637 {
4638 rtx label, mem, width, mask, x;
4639
4640 mem = gen_rtx_MEM (DImode, align);
4641 MEM_VOLATILE_P (mem) = 1;
4642
4643 emit_insn (gen_memory_barrier ());
4644 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4645 emit_label (XEXP (label, 0));
4646
4647 emit_load_locked (DImode, scratch, mem);
4648
4649 width = GEN_INT (GET_MODE_BITSIZE (mode));
4650 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4651 if (WORDS_BIG_ENDIAN)
4652 {
4653 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4654 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4655 }
4656 else
4657 {
4658 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4659 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4660 }
4661 emit_insn (gen_iordi3 (scratch, scratch, val));
4662
4663 emit_store_conditional (DImode, scratch, mem, scratch);
4664
4665 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4666 emit_unlikely_jump (x, label);
4667 }
4668 \f
4669 /* Adjust the cost of a scheduling dependency. Return the new cost of
4670 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4671
4672 static int
4673 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4674 {
4675 enum attr_type insn_type, dep_insn_type;
4676
4677 /* If the dependence is an anti-dependence, there is no cost. For an
4678 output dependence, there is sometimes a cost, but it doesn't seem
4679 worth handling those few cases. */
4680 if (REG_NOTE_KIND (link) != 0)
4681 return cost;
4682
4683 /* If we can't recognize the insns, we can't really do anything. */
4684 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4685 return cost;
4686
4687 insn_type = get_attr_type (insn);
4688 dep_insn_type = get_attr_type (dep_insn);
4689
4690 /* Bring in the user-defined memory latency. */
4691 if (dep_insn_type == TYPE_ILD
4692 || dep_insn_type == TYPE_FLD
4693 || dep_insn_type == TYPE_LDSYM)
4694 cost += alpha_memory_latency-1;
4695
4696 /* Everything else handled in DFA bypasses now. */
4697
4698 return cost;
4699 }
4700
4701 /* The number of instructions that can be issued per cycle. */
4702
4703 static int
4704 alpha_issue_rate (void)
4705 {
4706 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4707 }
4708
4709 /* How many alternative schedules to try. This should be as wide as the
4710 scheduling freedom in the DFA, but no wider. Making this value too
4711 large results extra work for the scheduler.
4712
4713 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4714 alternative schedules. For EV5, we can choose between E0/E1 and
4715 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4716
4717 static int
4718 alpha_multipass_dfa_lookahead (void)
4719 {
4720 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4721 }
4722 \f
4723 /* Machine-specific function data. */
4724
4725 struct machine_function GTY(())
4726 {
4727 /* For unicosmk. */
4728 /* List of call information words for calls from this function. */
4729 struct rtx_def *first_ciw;
4730 struct rtx_def *last_ciw;
4731 int ciw_count;
4732
4733 /* List of deferred case vectors. */
4734 struct rtx_def *addr_list;
4735
4736 /* For OSF. */
4737 const char *some_ld_name;
4738
4739 /* For TARGET_LD_BUGGY_LDGP. */
4740 struct rtx_def *gp_save_rtx;
4741 };
4742
4743 /* How to allocate a 'struct machine_function'. */
4744
4745 static struct machine_function *
4746 alpha_init_machine_status (void)
4747 {
4748 return ((struct machine_function *)
4749 ggc_alloc_cleared (sizeof (struct machine_function)));
4750 }
4751
4752 /* Functions to save and restore alpha_return_addr_rtx. */
4753
4754 /* Start the ball rolling with RETURN_ADDR_RTX. */
4755
4756 rtx
4757 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4758 {
4759 if (count != 0)
4760 return const0_rtx;
4761
4762 return get_hard_reg_initial_val (Pmode, REG_RA);
4763 }
4764
4765 /* Return or create a memory slot containing the gp value for the current
4766 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4767
4768 rtx
4769 alpha_gp_save_rtx (void)
4770 {
4771 rtx seq, m = cfun->machine->gp_save_rtx;
4772
4773 if (m == NULL)
4774 {
4775 start_sequence ();
4776
4777 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4778 m = validize_mem (m);
4779 emit_move_insn (m, pic_offset_table_rtx);
4780
4781 seq = get_insns ();
4782 end_sequence ();
4783
4784 /* We used to simply emit the sequence after entry_of_function.
4785 However this breaks the CFG if the first instruction in the
4786 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4787 label. Emit the sequence properly on the edge. We are only
4788 invoked from dw2_build_landing_pads and finish_eh_generation
4789 will call commit_edge_insertions thanks to a kludge. */
4790 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4791
4792 cfun->machine->gp_save_rtx = m;
4793 }
4794
4795 return m;
4796 }
4797
4798 static int
4799 alpha_ra_ever_killed (void)
4800 {
4801 rtx top;
4802
4803 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4804 return (int)df_regs_ever_live_p (REG_RA);
4805
4806 push_topmost_sequence ();
4807 top = get_insns ();
4808 pop_topmost_sequence ();
4809
4810 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4811 }
4812
4813 \f
4814 /* Return the trap mode suffix applicable to the current
4815 instruction, or NULL. */
4816
4817 static const char *
4818 get_trap_mode_suffix (void)
4819 {
4820 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4821
4822 switch (s)
4823 {
4824 case TRAP_SUFFIX_NONE:
4825 return NULL;
4826
4827 case TRAP_SUFFIX_SU:
4828 if (alpha_fptm >= ALPHA_FPTM_SU)
4829 return "su";
4830 return NULL;
4831
4832 case TRAP_SUFFIX_SUI:
4833 if (alpha_fptm >= ALPHA_FPTM_SUI)
4834 return "sui";
4835 return NULL;
4836
4837 case TRAP_SUFFIX_V_SV:
4838 switch (alpha_fptm)
4839 {
4840 case ALPHA_FPTM_N:
4841 return NULL;
4842 case ALPHA_FPTM_U:
4843 return "v";
4844 case ALPHA_FPTM_SU:
4845 case ALPHA_FPTM_SUI:
4846 return "sv";
4847 default:
4848 gcc_unreachable ();
4849 }
4850
4851 case TRAP_SUFFIX_V_SV_SVI:
4852 switch (alpha_fptm)
4853 {
4854 case ALPHA_FPTM_N:
4855 return NULL;
4856 case ALPHA_FPTM_U:
4857 return "v";
4858 case ALPHA_FPTM_SU:
4859 return "sv";
4860 case ALPHA_FPTM_SUI:
4861 return "svi";
4862 default:
4863 gcc_unreachable ();
4864 }
4865 break;
4866
4867 case TRAP_SUFFIX_U_SU_SUI:
4868 switch (alpha_fptm)
4869 {
4870 case ALPHA_FPTM_N:
4871 return NULL;
4872 case ALPHA_FPTM_U:
4873 return "u";
4874 case ALPHA_FPTM_SU:
4875 return "su";
4876 case ALPHA_FPTM_SUI:
4877 return "sui";
4878 default:
4879 gcc_unreachable ();
4880 }
4881 break;
4882
4883 default:
4884 gcc_unreachable ();
4885 }
4886 gcc_unreachable ();
4887 }
4888
4889 /* Return the rounding mode suffix applicable to the current
4890 instruction, or NULL. */
4891
4892 static const char *
4893 get_round_mode_suffix (void)
4894 {
4895 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4896
4897 switch (s)
4898 {
4899 case ROUND_SUFFIX_NONE:
4900 return NULL;
4901 case ROUND_SUFFIX_NORMAL:
4902 switch (alpha_fprm)
4903 {
4904 case ALPHA_FPRM_NORM:
4905 return NULL;
4906 case ALPHA_FPRM_MINF:
4907 return "m";
4908 case ALPHA_FPRM_CHOP:
4909 return "c";
4910 case ALPHA_FPRM_DYN:
4911 return "d";
4912 default:
4913 gcc_unreachable ();
4914 }
4915 break;
4916
4917 case ROUND_SUFFIX_C:
4918 return "c";
4919
4920 default:
4921 gcc_unreachable ();
4922 }
4923 gcc_unreachable ();
4924 }
4925
4926 /* Locate some local-dynamic symbol still in use by this function
4927 so that we can print its name in some movdi_er_tlsldm pattern. */
4928
4929 static int
4930 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4931 {
4932 rtx x = *px;
4933
4934 if (GET_CODE (x) == SYMBOL_REF
4935 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4936 {
4937 cfun->machine->some_ld_name = XSTR (x, 0);
4938 return 1;
4939 }
4940
4941 return 0;
4942 }
4943
4944 static const char *
4945 get_some_local_dynamic_name (void)
4946 {
4947 rtx insn;
4948
4949 if (cfun->machine->some_ld_name)
4950 return cfun->machine->some_ld_name;
4951
4952 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4953 if (INSN_P (insn)
4954 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4955 return cfun->machine->some_ld_name;
4956
4957 gcc_unreachable ();
4958 }
4959
4960 /* Print an operand. Recognize special options, documented below. */
4961
4962 void
4963 print_operand (FILE *file, rtx x, int code)
4964 {
4965 int i;
4966
4967 switch (code)
4968 {
4969 case '~':
4970 /* Print the assembler name of the current function. */
4971 assemble_name (file, alpha_fnname);
4972 break;
4973
4974 case '&':
4975 assemble_name (file, get_some_local_dynamic_name ());
4976 break;
4977
4978 case '/':
4979 {
4980 const char *trap = get_trap_mode_suffix ();
4981 const char *round = get_round_mode_suffix ();
4982
4983 if (trap || round)
4984 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
4985 (trap ? trap : ""), (round ? round : ""));
4986 break;
4987 }
4988
4989 case ',':
4990 /* Generates single precision instruction suffix. */
4991 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
4992 break;
4993
4994 case '-':
4995 /* Generates double precision instruction suffix. */
4996 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
4997 break;
4998
4999 case '#':
5000 if (alpha_this_literal_sequence_number == 0)
5001 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5002 fprintf (file, "%d", alpha_this_literal_sequence_number);
5003 break;
5004
5005 case '*':
5006 if (alpha_this_gpdisp_sequence_number == 0)
5007 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5008 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5009 break;
5010
5011 case 'H':
5012 if (GET_CODE (x) == HIGH)
5013 output_addr_const (file, XEXP (x, 0));
5014 else
5015 output_operand_lossage ("invalid %%H value");
5016 break;
5017
5018 case 'J':
5019 {
5020 const char *lituse;
5021
5022 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5023 {
5024 x = XVECEXP (x, 0, 0);
5025 lituse = "lituse_tlsgd";
5026 }
5027 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5028 {
5029 x = XVECEXP (x, 0, 0);
5030 lituse = "lituse_tlsldm";
5031 }
5032 else if (GET_CODE (x) == CONST_INT)
5033 lituse = "lituse_jsr";
5034 else
5035 {
5036 output_operand_lossage ("invalid %%J value");
5037 break;
5038 }
5039
5040 if (x != const0_rtx)
5041 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5042 }
5043 break;
5044
5045 case 'j':
5046 {
5047 const char *lituse;
5048
5049 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5050 lituse = "lituse_jsrdirect";
5051 #else
5052 lituse = "lituse_jsr";
5053 #endif
5054
5055 gcc_assert (INTVAL (x) != 0);
5056 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5057 }
5058 break;
5059 case 'r':
5060 /* If this operand is the constant zero, write it as "$31". */
5061 if (GET_CODE (x) == REG)
5062 fprintf (file, "%s", reg_names[REGNO (x)]);
5063 else if (x == CONST0_RTX (GET_MODE (x)))
5064 fprintf (file, "$31");
5065 else
5066 output_operand_lossage ("invalid %%r value");
5067 break;
5068
5069 case 'R':
5070 /* Similar, but for floating-point. */
5071 if (GET_CODE (x) == REG)
5072 fprintf (file, "%s", reg_names[REGNO (x)]);
5073 else if (x == CONST0_RTX (GET_MODE (x)))
5074 fprintf (file, "$f31");
5075 else
5076 output_operand_lossage ("invalid %%R value");
5077 break;
5078
5079 case 'N':
5080 /* Write the 1's complement of a constant. */
5081 if (GET_CODE (x) != CONST_INT)
5082 output_operand_lossage ("invalid %%N value");
5083
5084 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5085 break;
5086
5087 case 'P':
5088 /* Write 1 << C, for a constant C. */
5089 if (GET_CODE (x) != CONST_INT)
5090 output_operand_lossage ("invalid %%P value");
5091
5092 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5093 break;
5094
5095 case 'h':
5096 /* Write the high-order 16 bits of a constant, sign-extended. */
5097 if (GET_CODE (x) != CONST_INT)
5098 output_operand_lossage ("invalid %%h value");
5099
5100 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5101 break;
5102
5103 case 'L':
5104 /* Write the low-order 16 bits of a constant, sign-extended. */
5105 if (GET_CODE (x) != CONST_INT)
5106 output_operand_lossage ("invalid %%L value");
5107
5108 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5109 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5110 break;
5111
5112 case 'm':
5113 /* Write mask for ZAP insn. */
5114 if (GET_CODE (x) == CONST_DOUBLE)
5115 {
5116 HOST_WIDE_INT mask = 0;
5117 HOST_WIDE_INT value;
5118
5119 value = CONST_DOUBLE_LOW (x);
5120 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5121 i++, value >>= 8)
5122 if (value & 0xff)
5123 mask |= (1 << i);
5124
5125 value = CONST_DOUBLE_HIGH (x);
5126 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5127 i++, value >>= 8)
5128 if (value & 0xff)
5129 mask |= (1 << (i + sizeof (int)));
5130
5131 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5132 }
5133
5134 else if (GET_CODE (x) == CONST_INT)
5135 {
5136 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5137
5138 for (i = 0; i < 8; i++, value >>= 8)
5139 if (value & 0xff)
5140 mask |= (1 << i);
5141
5142 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5143 }
5144 else
5145 output_operand_lossage ("invalid %%m value");
5146 break;
5147
5148 case 'M':
5149 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5150 if (GET_CODE (x) != CONST_INT
5151 || (INTVAL (x) != 8 && INTVAL (x) != 16
5152 && INTVAL (x) != 32 && INTVAL (x) != 64))
5153 output_operand_lossage ("invalid %%M value");
5154
5155 fprintf (file, "%s",
5156 (INTVAL (x) == 8 ? "b"
5157 : INTVAL (x) == 16 ? "w"
5158 : INTVAL (x) == 32 ? "l"
5159 : "q"));
5160 break;
5161
5162 case 'U':
5163 /* Similar, except do it from the mask. */
5164 if (GET_CODE (x) == CONST_INT)
5165 {
5166 HOST_WIDE_INT value = INTVAL (x);
5167
5168 if (value == 0xff)
5169 {
5170 fputc ('b', file);
5171 break;
5172 }
5173 if (value == 0xffff)
5174 {
5175 fputc ('w', file);
5176 break;
5177 }
5178 if (value == 0xffffffff)
5179 {
5180 fputc ('l', file);
5181 break;
5182 }
5183 if (value == -1)
5184 {
5185 fputc ('q', file);
5186 break;
5187 }
5188 }
5189 else if (HOST_BITS_PER_WIDE_INT == 32
5190 && GET_CODE (x) == CONST_DOUBLE
5191 && CONST_DOUBLE_LOW (x) == 0xffffffff
5192 && CONST_DOUBLE_HIGH (x) == 0)
5193 {
5194 fputc ('l', file);
5195 break;
5196 }
5197 output_operand_lossage ("invalid %%U value");
5198 break;
5199
5200 case 's':
5201 /* Write the constant value divided by 8 for little-endian mode or
5202 (56 - value) / 8 for big-endian mode. */
5203
5204 if (GET_CODE (x) != CONST_INT
5205 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5206 ? 56
5207 : 64)
5208 || (INTVAL (x) & 7) != 0)
5209 output_operand_lossage ("invalid %%s value");
5210
5211 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5212 WORDS_BIG_ENDIAN
5213 ? (56 - INTVAL (x)) / 8
5214 : INTVAL (x) / 8);
5215 break;
5216
5217 case 'S':
5218 /* Same, except compute (64 - c) / 8 */
5219
5220 if (GET_CODE (x) != CONST_INT
5221 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5222 && (INTVAL (x) & 7) != 8)
5223 output_operand_lossage ("invalid %%s value");
5224
5225 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5226 break;
5227
5228 case 't':
5229 {
5230 /* On Unicos/Mk systems: use a DEX expression if the symbol
5231 clashes with a register name. */
5232 int dex = unicosmk_need_dex (x);
5233 if (dex)
5234 fprintf (file, "DEX(%d)", dex);
5235 else
5236 output_addr_const (file, x);
5237 }
5238 break;
5239
5240 case 'C': case 'D': case 'c': case 'd':
5241 /* Write out comparison name. */
5242 {
5243 enum rtx_code c = GET_CODE (x);
5244
5245 if (!COMPARISON_P (x))
5246 output_operand_lossage ("invalid %%C value");
5247
5248 else if (code == 'D')
5249 c = reverse_condition (c);
5250 else if (code == 'c')
5251 c = swap_condition (c);
5252 else if (code == 'd')
5253 c = swap_condition (reverse_condition (c));
5254
5255 if (c == LEU)
5256 fprintf (file, "ule");
5257 else if (c == LTU)
5258 fprintf (file, "ult");
5259 else if (c == UNORDERED)
5260 fprintf (file, "un");
5261 else
5262 fprintf (file, "%s", GET_RTX_NAME (c));
5263 }
5264 break;
5265
5266 case 'E':
5267 /* Write the divide or modulus operator. */
5268 switch (GET_CODE (x))
5269 {
5270 case DIV:
5271 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5272 break;
5273 case UDIV:
5274 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5275 break;
5276 case MOD:
5277 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5278 break;
5279 case UMOD:
5280 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5281 break;
5282 default:
5283 output_operand_lossage ("invalid %%E value");
5284 break;
5285 }
5286 break;
5287
5288 case 'A':
5289 /* Write "_u" for unaligned access. */
5290 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
5291 fprintf (file, "_u");
5292 break;
5293
5294 case 0:
5295 if (GET_CODE (x) == REG)
5296 fprintf (file, "%s", reg_names[REGNO (x)]);
5297 else if (GET_CODE (x) == MEM)
5298 output_address (XEXP (x, 0));
5299 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5300 {
5301 switch (XINT (XEXP (x, 0), 1))
5302 {
5303 case UNSPEC_DTPREL:
5304 case UNSPEC_TPREL:
5305 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5306 break;
5307 default:
5308 output_operand_lossage ("unknown relocation unspec");
5309 break;
5310 }
5311 }
5312 else
5313 output_addr_const (file, x);
5314 break;
5315
5316 default:
5317 output_operand_lossage ("invalid %%xn code");
5318 }
5319 }
5320
5321 void
5322 print_operand_address (FILE *file, rtx addr)
5323 {
5324 int basereg = 31;
5325 HOST_WIDE_INT offset = 0;
5326
5327 if (GET_CODE (addr) == AND)
5328 addr = XEXP (addr, 0);
5329
5330 if (GET_CODE (addr) == PLUS
5331 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
5332 {
5333 offset = INTVAL (XEXP (addr, 1));
5334 addr = XEXP (addr, 0);
5335 }
5336
5337 if (GET_CODE (addr) == LO_SUM)
5338 {
5339 const char *reloc16, *reloclo;
5340 rtx op1 = XEXP (addr, 1);
5341
5342 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5343 {
5344 op1 = XEXP (op1, 0);
5345 switch (XINT (op1, 1))
5346 {
5347 case UNSPEC_DTPREL:
5348 reloc16 = NULL;
5349 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5350 break;
5351 case UNSPEC_TPREL:
5352 reloc16 = NULL;
5353 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5354 break;
5355 default:
5356 output_operand_lossage ("unknown relocation unspec");
5357 return;
5358 }
5359
5360 output_addr_const (file, XVECEXP (op1, 0, 0));
5361 }
5362 else
5363 {
5364 reloc16 = "gprel";
5365 reloclo = "gprellow";
5366 output_addr_const (file, op1);
5367 }
5368
5369 if (offset)
5370 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5371
5372 addr = XEXP (addr, 0);
5373 switch (GET_CODE (addr))
5374 {
5375 case REG:
5376 basereg = REGNO (addr);
5377 break;
5378
5379 case SUBREG:
5380 basereg = subreg_regno (addr);
5381 break;
5382
5383 default:
5384 gcc_unreachable ();
5385 }
5386
5387 fprintf (file, "($%d)\t\t!%s", basereg,
5388 (basereg == 29 ? reloc16 : reloclo));
5389 return;
5390 }
5391
5392 switch (GET_CODE (addr))
5393 {
5394 case REG:
5395 basereg = REGNO (addr);
5396 break;
5397
5398 case SUBREG:
5399 basereg = subreg_regno (addr);
5400 break;
5401
5402 case CONST_INT:
5403 offset = INTVAL (addr);
5404 break;
5405
5406 #if TARGET_ABI_OPEN_VMS
5407 case SYMBOL_REF:
5408 fprintf (file, "%s", XSTR (addr, 0));
5409 return;
5410
5411 case CONST:
5412 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5413 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5414 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5415 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5416 INTVAL (XEXP (XEXP (addr, 0), 1)));
5417 return;
5418
5419 #endif
5420 default:
5421 gcc_unreachable ();
5422 }
5423
5424 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5425 }
5426 \f
5427 /* Emit RTL insns to initialize the variable parts of a trampoline at
5428 TRAMP. FNADDR is an RTX for the address of the function's pure
5429 code. CXT is an RTX for the static chain value for the function.
5430
5431 The three offset parameters are for the individual template's
5432 layout. A JMPOFS < 0 indicates that the trampoline does not
5433 contain instructions at all.
5434
5435 We assume here that a function will be called many more times than
5436 its address is taken (e.g., it might be passed to qsort), so we
5437 take the trouble to initialize the "hint" field in the JMP insn.
5438 Note that the hint field is PC (new) + 4 * bits 13:0. */
5439
5440 void
5441 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5442 int fnofs, int cxtofs, int jmpofs)
5443 {
5444 rtx temp, temp1, addr;
5445 /* VMS really uses DImode pointers in memory at this point. */
5446 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5447
5448 #ifdef POINTERS_EXTEND_UNSIGNED
5449 fnaddr = convert_memory_address (mode, fnaddr);
5450 cxt = convert_memory_address (mode, cxt);
5451 #endif
5452
5453 /* Store function address and CXT. */
5454 addr = memory_address (mode, plus_constant (tramp, fnofs));
5455 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5456 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5457 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5458
5459 /* This has been disabled since the hint only has a 32k range, and in
5460 no existing OS is the stack within 32k of the text segment. */
5461 if (0 && jmpofs >= 0)
5462 {
5463 /* Compute hint value. */
5464 temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
5465 temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
5466 OPTAB_WIDEN);
5467 temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
5468 build_int_cst (NULL_TREE, 2), NULL_RTX, 1);
5469 temp = expand_and (SImode, gen_lowpart (SImode, temp),
5470 GEN_INT (0x3fff), 0);
5471
5472 /* Merge in the hint. */
5473 addr = memory_address (SImode, plus_constant (tramp, jmpofs));
5474 temp1 = force_reg (SImode, gen_rtx_MEM (SImode, addr));
5475 temp1 = expand_and (SImode, temp1, GEN_INT (0xffffc000), NULL_RTX);
5476 temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
5477 OPTAB_WIDEN);
5478 emit_move_insn (gen_rtx_MEM (SImode, addr), temp1);
5479 }
5480
5481 #ifdef ENABLE_EXECUTE_STACK
5482 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5483 0, VOIDmode, 1, tramp, Pmode);
5484 #endif
5485
5486 if (jmpofs >= 0)
5487 emit_insn (gen_imb ());
5488 }
5489 \f
5490 /* Determine where to put an argument to a function.
5491 Value is zero to push the argument on the stack,
5492 or a hard register in which to store the argument.
5493
5494 MODE is the argument's machine mode.
5495 TYPE is the data type of the argument (as a tree).
5496 This is null for libcalls where that information may
5497 not be available.
5498 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5499 the preceding args and about the function being called.
5500 NAMED is nonzero if this argument is a named parameter
5501 (otherwise it is an extra parameter matching an ellipsis).
5502
5503 On Alpha the first 6 words of args are normally in registers
5504 and the rest are pushed. */
5505
5506 rtx
5507 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5508 int named ATTRIBUTE_UNUSED)
5509 {
5510 int basereg;
5511 int num_args;
5512
5513 /* Don't get confused and pass small structures in FP registers. */
5514 if (type && AGGREGATE_TYPE_P (type))
5515 basereg = 16;
5516 else
5517 {
5518 #ifdef ENABLE_CHECKING
5519 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5520 values here. */
5521 gcc_assert (!COMPLEX_MODE_P (mode));
5522 #endif
5523
5524 /* Set up defaults for FP operands passed in FP registers, and
5525 integral operands passed in integer registers. */
5526 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5527 basereg = 32 + 16;
5528 else
5529 basereg = 16;
5530 }
5531
5532 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5533 the three platforms, so we can't avoid conditional compilation. */
5534 #if TARGET_ABI_OPEN_VMS
5535 {
5536 if (mode == VOIDmode)
5537 return alpha_arg_info_reg_val (cum);
5538
5539 num_args = cum.num_args;
5540 if (num_args >= 6
5541 || targetm.calls.must_pass_in_stack (mode, type))
5542 return NULL_RTX;
5543 }
5544 #elif TARGET_ABI_UNICOSMK
5545 {
5546 int size;
5547
5548 /* If this is the last argument, generate the call info word (CIW). */
5549 /* ??? We don't include the caller's line number in the CIW because
5550 I don't know how to determine it if debug infos are turned off. */
5551 if (mode == VOIDmode)
5552 {
5553 int i;
5554 HOST_WIDE_INT lo;
5555 HOST_WIDE_INT hi;
5556 rtx ciw;
5557
5558 lo = 0;
5559
5560 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5561 if (cum.reg_args_type[i])
5562 lo |= (1 << (7 - i));
5563
5564 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5565 lo |= 7;
5566 else
5567 lo |= cum.num_reg_words;
5568
5569 #if HOST_BITS_PER_WIDE_INT == 32
5570 hi = (cum.num_args << 20) | cum.num_arg_words;
5571 #else
5572 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5573 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5574 hi = 0;
5575 #endif
5576 ciw = immed_double_const (lo, hi, DImode);
5577
5578 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5579 UNSPEC_UMK_LOAD_CIW);
5580 }
5581
5582 size = ALPHA_ARG_SIZE (mode, type, named);
5583 num_args = cum.num_reg_words;
5584 if (cum.force_stack
5585 || cum.num_reg_words + size > 6
5586 || targetm.calls.must_pass_in_stack (mode, type))
5587 return NULL_RTX;
5588 else if (type && TYPE_MODE (type) == BLKmode)
5589 {
5590 rtx reg1, reg2;
5591
5592 reg1 = gen_rtx_REG (DImode, num_args + 16);
5593 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5594
5595 /* The argument fits in two registers. Note that we still need to
5596 reserve a register for empty structures. */
5597 if (size == 0)
5598 return NULL_RTX;
5599 else if (size == 1)
5600 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5601 else
5602 {
5603 reg2 = gen_rtx_REG (DImode, num_args + 17);
5604 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5605 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5606 }
5607 }
5608 }
5609 #elif TARGET_ABI_OSF
5610 {
5611 if (cum >= 6)
5612 return NULL_RTX;
5613 num_args = cum;
5614
5615 /* VOID is passed as a special flag for "last argument". */
5616 if (type == void_type_node)
5617 basereg = 16;
5618 else if (targetm.calls.must_pass_in_stack (mode, type))
5619 return NULL_RTX;
5620 }
5621 #else
5622 #error Unhandled ABI
5623 #endif
5624
5625 return gen_rtx_REG (mode, num_args + basereg);
5626 }
5627
5628 static int
5629 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5630 enum machine_mode mode ATTRIBUTE_UNUSED,
5631 tree type ATTRIBUTE_UNUSED,
5632 bool named ATTRIBUTE_UNUSED)
5633 {
5634 int words = 0;
5635
5636 #if TARGET_ABI_OPEN_VMS
5637 if (cum->num_args < 6
5638 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5639 words = 6 - cum->num_args;
5640 #elif TARGET_ABI_UNICOSMK
5641 /* Never any split arguments. */
5642 #elif TARGET_ABI_OSF
5643 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5644 words = 6 - *cum;
5645 #else
5646 #error Unhandled ABI
5647 #endif
5648
5649 return words * UNITS_PER_WORD;
5650 }
5651
5652
5653 /* Return true if TYPE must be returned in memory, instead of in registers. */
5654
5655 static bool
5656 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5657 {
5658 enum machine_mode mode = VOIDmode;
5659 int size;
5660
5661 if (type)
5662 {
5663 mode = TYPE_MODE (type);
5664
5665 /* All aggregates are returned in memory. */
5666 if (AGGREGATE_TYPE_P (type))
5667 return true;
5668 }
5669
5670 size = GET_MODE_SIZE (mode);
5671 switch (GET_MODE_CLASS (mode))
5672 {
5673 case MODE_VECTOR_FLOAT:
5674 /* Pass all float vectors in memory, like an aggregate. */
5675 return true;
5676
5677 case MODE_COMPLEX_FLOAT:
5678 /* We judge complex floats on the size of their element,
5679 not the size of the whole type. */
5680 size = GET_MODE_UNIT_SIZE (mode);
5681 break;
5682
5683 case MODE_INT:
5684 case MODE_FLOAT:
5685 case MODE_COMPLEX_INT:
5686 case MODE_VECTOR_INT:
5687 break;
5688
5689 default:
5690 /* ??? We get called on all sorts of random stuff from
5691 aggregate_value_p. We must return something, but it's not
5692 clear what's safe to return. Pretend it's a struct I
5693 guess. */
5694 return true;
5695 }
5696
5697 /* Otherwise types must fit in one register. */
5698 return size > UNITS_PER_WORD;
5699 }
5700
5701 /* Return true if TYPE should be passed by invisible reference. */
5702
5703 static bool
5704 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5705 enum machine_mode mode,
5706 const_tree type ATTRIBUTE_UNUSED,
5707 bool named ATTRIBUTE_UNUSED)
5708 {
5709 return mode == TFmode || mode == TCmode;
5710 }
5711
5712 /* Define how to find the value returned by a function. VALTYPE is the
5713 data type of the value (as a tree). If the precise function being
5714 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5715 MODE is set instead of VALTYPE for libcalls.
5716
5717 On Alpha the value is found in $0 for integer functions and
5718 $f0 for floating-point functions. */
5719
5720 rtx
5721 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5722 enum machine_mode mode)
5723 {
5724 unsigned int regnum, dummy;
5725 enum mode_class class;
5726
5727 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5728
5729 if (valtype)
5730 mode = TYPE_MODE (valtype);
5731
5732 class = GET_MODE_CLASS (mode);
5733 switch (class)
5734 {
5735 case MODE_INT:
5736 PROMOTE_MODE (mode, dummy, valtype);
5737 /* FALLTHRU */
5738
5739 case MODE_COMPLEX_INT:
5740 case MODE_VECTOR_INT:
5741 regnum = 0;
5742 break;
5743
5744 case MODE_FLOAT:
5745 regnum = 32;
5746 break;
5747
5748 case MODE_COMPLEX_FLOAT:
5749 {
5750 enum machine_mode cmode = GET_MODE_INNER (mode);
5751
5752 return gen_rtx_PARALLEL
5753 (VOIDmode,
5754 gen_rtvec (2,
5755 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5756 const0_rtx),
5757 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5758 GEN_INT (GET_MODE_SIZE (cmode)))));
5759 }
5760
5761 default:
5762 gcc_unreachable ();
5763 }
5764
5765 return gen_rtx_REG (mode, regnum);
5766 }
5767
5768 /* TCmode complex values are passed by invisible reference. We
5769 should not split these values. */
5770
5771 static bool
5772 alpha_split_complex_arg (const_tree type)
5773 {
5774 return TYPE_MODE (type) != TCmode;
5775 }
5776
5777 static tree
5778 alpha_build_builtin_va_list (void)
5779 {
5780 tree base, ofs, space, record, type_decl;
5781
5782 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5783 return ptr_type_node;
5784
5785 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5786 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
5787 TREE_CHAIN (record) = type_decl;
5788 TYPE_NAME (record) = type_decl;
5789
5790 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5791
5792 /* Dummy field to prevent alignment warnings. */
5793 space = build_decl (FIELD_DECL, NULL_TREE, integer_type_node);
5794 DECL_FIELD_CONTEXT (space) = record;
5795 DECL_ARTIFICIAL (space) = 1;
5796 DECL_IGNORED_P (space) = 1;
5797
5798 ofs = build_decl (FIELD_DECL, get_identifier ("__offset"),
5799 integer_type_node);
5800 DECL_FIELD_CONTEXT (ofs) = record;
5801 TREE_CHAIN (ofs) = space;
5802
5803 base = build_decl (FIELD_DECL, get_identifier ("__base"),
5804 ptr_type_node);
5805 DECL_FIELD_CONTEXT (base) = record;
5806 TREE_CHAIN (base) = ofs;
5807
5808 TYPE_FIELDS (record) = base;
5809 layout_type (record);
5810
5811 va_list_gpr_counter_field = ofs;
5812 return record;
5813 }
5814
5815 #if TARGET_ABI_OSF
5816 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5817 and constant additions. */
5818
5819 static tree
5820 va_list_skip_additions (tree lhs)
5821 {
5822 tree rhs, stmt;
5823
5824 if (TREE_CODE (lhs) != SSA_NAME)
5825 return lhs;
5826
5827 for (;;)
5828 {
5829 stmt = SSA_NAME_DEF_STMT (lhs);
5830
5831 if (TREE_CODE (stmt) == PHI_NODE)
5832 return stmt;
5833
5834 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT
5835 || GIMPLE_STMT_OPERAND (stmt, 0) != lhs)
5836 return lhs;
5837
5838 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
5839 if (TREE_CODE (rhs) == WITH_SIZE_EXPR)
5840 rhs = TREE_OPERAND (rhs, 0);
5841
5842 if ((TREE_CODE (rhs) != NOP_EXPR
5843 && TREE_CODE (rhs) != CONVERT_EXPR
5844 && ((TREE_CODE (rhs) != PLUS_EXPR
5845 && TREE_CODE (rhs) != POINTER_PLUS_EXPR)
5846 || TREE_CODE (TREE_OPERAND (rhs, 1)) != INTEGER_CST
5847 || !host_integerp (TREE_OPERAND (rhs, 1), 1)))
5848 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5849 return rhs;
5850
5851 lhs = TREE_OPERAND (rhs, 0);
5852 }
5853 }
5854
5855 /* Check if LHS = RHS statement is
5856 LHS = *(ap.__base + ap.__offset + cst)
5857 or
5858 LHS = *(ap.__base
5859 + ((ap.__offset + cst <= 47)
5860 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5861 If the former, indicate that GPR registers are needed,
5862 if the latter, indicate that FPR registers are needed.
5863
5864 Also look for LHS = (*ptr).field, where ptr is one of the forms
5865 listed above.
5866
5867 On alpha, cfun->va_list_gpr_size is used as size of the needed
5868 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5869 registers are needed and bit 1 set if FPR registers are needed.
5870 Return true if va_list references should not be scanned for the
5871 current statement. */
5872
5873 static bool
5874 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_tree lhs, const_tree rhs)
5875 {
5876 tree base, offset, arg1, arg2;
5877 int offset_arg = 1;
5878
5879 while (handled_component_p (rhs))
5880 rhs = TREE_OPERAND (rhs, 0);
5881 if (TREE_CODE (rhs) != INDIRECT_REF
5882 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5883 return false;
5884
5885 lhs = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5886 if (lhs == NULL_TREE
5887 || TREE_CODE (lhs) != POINTER_PLUS_EXPR)
5888 return false;
5889
5890 base = TREE_OPERAND (lhs, 0);
5891 if (TREE_CODE (base) == SSA_NAME)
5892 base = va_list_skip_additions (base);
5893
5894 if (TREE_CODE (base) != COMPONENT_REF
5895 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5896 {
5897 base = TREE_OPERAND (lhs, 0);
5898 if (TREE_CODE (base) == SSA_NAME)
5899 base = va_list_skip_additions (base);
5900
5901 if (TREE_CODE (base) != COMPONENT_REF
5902 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5903 return false;
5904
5905 offset_arg = 0;
5906 }
5907
5908 base = get_base_address (base);
5909 if (TREE_CODE (base) != VAR_DECL
5910 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5911 return false;
5912
5913 offset = TREE_OPERAND (lhs, offset_arg);
5914 if (TREE_CODE (offset) == SSA_NAME)
5915 offset = va_list_skip_additions (offset);
5916
5917 if (TREE_CODE (offset) == PHI_NODE)
5918 {
5919 HOST_WIDE_INT sub;
5920
5921 if (PHI_NUM_ARGS (offset) != 2)
5922 goto escapes;
5923
5924 arg1 = va_list_skip_additions (PHI_ARG_DEF (offset, 0));
5925 arg2 = va_list_skip_additions (PHI_ARG_DEF (offset, 1));
5926 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5927 {
5928 tree tem = arg1;
5929 arg1 = arg2;
5930 arg2 = tem;
5931
5932 if (TREE_CODE (arg2) != MINUS_EXPR && TREE_CODE (arg2) != PLUS_EXPR)
5933 goto escapes;
5934 }
5935 if (!host_integerp (TREE_OPERAND (arg2, 1), 0))
5936 goto escapes;
5937
5938 sub = tree_low_cst (TREE_OPERAND (arg2, 1), 0);
5939 if (TREE_CODE (arg2) == MINUS_EXPR)
5940 sub = -sub;
5941 if (sub < -48 || sub > -32)
5942 goto escapes;
5943
5944 arg2 = va_list_skip_additions (TREE_OPERAND (arg2, 0));
5945 if (arg1 != arg2)
5946 goto escapes;
5947
5948 if (TREE_CODE (arg1) == SSA_NAME)
5949 arg1 = va_list_skip_additions (arg1);
5950
5951 if (TREE_CODE (arg1) != COMPONENT_REF
5952 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
5953 || get_base_address (arg1) != base)
5954 goto escapes;
5955
5956 /* Need floating point regs. */
5957 cfun->va_list_fpr_size |= 2;
5958 }
5959 else if (TREE_CODE (offset) != COMPONENT_REF
5960 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
5961 || get_base_address (offset) != base)
5962 goto escapes;
5963 else
5964 /* Need general regs. */
5965 cfun->va_list_fpr_size |= 1;
5966 return false;
5967
5968 escapes:
5969 si->va_list_escapes = true;
5970 return false;
5971 }
5972 #endif
5973
5974 /* Perform any needed actions needed for a function that is receiving a
5975 variable number of arguments. */
5976
5977 static void
5978 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
5979 tree type, int *pretend_size, int no_rtl)
5980 {
5981 CUMULATIVE_ARGS cum = *pcum;
5982
5983 /* Skip the current argument. */
5984 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
5985
5986 #if TARGET_ABI_UNICOSMK
5987 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
5988 arguments on the stack. Unfortunately, it doesn't always store the first
5989 one (i.e. the one that arrives in $16 or $f16). This is not a problem
5990 with stdargs as we always have at least one named argument there. */
5991 if (cum.num_reg_words < 6)
5992 {
5993 if (!no_rtl)
5994 {
5995 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
5996 emit_insn (gen_arg_home_umk ());
5997 }
5998 *pretend_size = 0;
5999 }
6000 #elif TARGET_ABI_OPEN_VMS
6001 /* For VMS, we allocate space for all 6 arg registers plus a count.
6002
6003 However, if NO registers need to be saved, don't allocate any space.
6004 This is not only because we won't need the space, but because AP
6005 includes the current_pretend_args_size and we don't want to mess up
6006 any ap-relative addresses already made. */
6007 if (cum.num_args < 6)
6008 {
6009 if (!no_rtl)
6010 {
6011 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6012 emit_insn (gen_arg_home ());
6013 }
6014 *pretend_size = 7 * UNITS_PER_WORD;
6015 }
6016 #else
6017 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6018 only push those that are remaining. However, if NO registers need to
6019 be saved, don't allocate any space. This is not only because we won't
6020 need the space, but because AP includes the current_pretend_args_size
6021 and we don't want to mess up any ap-relative addresses already made.
6022
6023 If we are not to use the floating-point registers, save the integer
6024 registers where we would put the floating-point registers. This is
6025 not the most efficient way to implement varargs with just one register
6026 class, but it isn't worth doing anything more efficient in this rare
6027 case. */
6028 if (cum >= 6)
6029 return;
6030
6031 if (!no_rtl)
6032 {
6033 int count;
6034 alias_set_type set = get_varargs_alias_set ();
6035 rtx tmp;
6036
6037 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6038 if (count > 6 - cum)
6039 count = 6 - cum;
6040
6041 /* Detect whether integer registers or floating-point registers
6042 are needed by the detected va_arg statements. See above for
6043 how these values are computed. Note that the "escape" value
6044 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6045 these bits set. */
6046 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6047
6048 if (cfun->va_list_fpr_size & 1)
6049 {
6050 tmp = gen_rtx_MEM (BLKmode,
6051 plus_constant (virtual_incoming_args_rtx,
6052 (cum + 6) * UNITS_PER_WORD));
6053 MEM_NOTRAP_P (tmp) = 1;
6054 set_mem_alias_set (tmp, set);
6055 move_block_from_reg (16 + cum, tmp, count);
6056 }
6057
6058 if (cfun->va_list_fpr_size & 2)
6059 {
6060 tmp = gen_rtx_MEM (BLKmode,
6061 plus_constant (virtual_incoming_args_rtx,
6062 cum * UNITS_PER_WORD));
6063 MEM_NOTRAP_P (tmp) = 1;
6064 set_mem_alias_set (tmp, set);
6065 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6066 }
6067 }
6068 *pretend_size = 12 * UNITS_PER_WORD;
6069 #endif
6070 }
6071
6072 void
6073 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6074 {
6075 HOST_WIDE_INT offset;
6076 tree t, offset_field, base_field;
6077
6078 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6079 return;
6080
6081 if (TARGET_ABI_UNICOSMK)
6082 std_expand_builtin_va_start (valist, nextarg);
6083
6084 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6085 up by 48, storing fp arg registers in the first 48 bytes, and the
6086 integer arg registers in the next 48 bytes. This is only done,
6087 however, if any integer registers need to be stored.
6088
6089 If no integer registers need be stored, then we must subtract 48
6090 in order to account for the integer arg registers which are counted
6091 in argsize above, but which are not actually stored on the stack.
6092 Must further be careful here about structures straddling the last
6093 integer argument register; that futzes with pretend_args_size,
6094 which changes the meaning of AP. */
6095
6096 if (NUM_ARGS < 6)
6097 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6098 else
6099 offset = -6 * UNITS_PER_WORD + current_function_pretend_args_size;
6100
6101 if (TARGET_ABI_OPEN_VMS)
6102 {
6103 nextarg = plus_constant (nextarg, offset);
6104 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6105 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (valist), valist,
6106 make_tree (ptr_type_node, nextarg));
6107 TREE_SIDE_EFFECTS (t) = 1;
6108
6109 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6110 }
6111 else
6112 {
6113 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6114 offset_field = TREE_CHAIN (base_field);
6115
6116 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6117 valist, base_field, NULL_TREE);
6118 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6119 valist, offset_field, NULL_TREE);
6120
6121 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6122 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6123 size_int (offset));
6124 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (base_field), base_field, t);
6125 TREE_SIDE_EFFECTS (t) = 1;
6126 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6127
6128 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6129 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset_field),
6130 offset_field, t);
6131 TREE_SIDE_EFFECTS (t) = 1;
6132 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6133 }
6134 }
6135
6136 static tree
6137 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset, tree *pre_p)
6138 {
6139 tree type_size, ptr_type, addend, t, addr, internal_post;
6140
6141 /* If the type could not be passed in registers, skip the block
6142 reserved for the registers. */
6143 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6144 {
6145 t = build_int_cst (TREE_TYPE (offset), 6*8);
6146 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (offset), offset,
6147 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t));
6148 gimplify_and_add (t, pre_p);
6149 }
6150
6151 addend = offset;
6152 ptr_type = build_pointer_type (type);
6153
6154 if (TREE_CODE (type) == COMPLEX_TYPE)
6155 {
6156 tree real_part, imag_part, real_temp;
6157
6158 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6159 offset, pre_p);
6160
6161 /* Copy the value into a new temporary, lest the formal temporary
6162 be reused out from under us. */
6163 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6164
6165 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6166 offset, pre_p);
6167
6168 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6169 }
6170 else if (TREE_CODE (type) == REAL_TYPE)
6171 {
6172 tree fpaddend, cond, fourtyeight;
6173
6174 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6175 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6176 addend, fourtyeight);
6177 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6178 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6179 fpaddend, addend);
6180 }
6181
6182 /* Build the final address and force that value into a temporary. */
6183 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6184 fold_convert (sizetype, addend));
6185 internal_post = NULL;
6186 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6187 append_to_statement_list (internal_post, pre_p);
6188
6189 /* Update the offset field. */
6190 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6191 if (type_size == NULL || TREE_OVERFLOW (type_size))
6192 t = size_zero_node;
6193 else
6194 {
6195 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6196 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6197 t = size_binop (MULT_EXPR, t, size_int (8));
6198 }
6199 t = fold_convert (TREE_TYPE (offset), t);
6200 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset,
6201 build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t));
6202 gimplify_and_add (t, pre_p);
6203
6204 return build_va_arg_indirect_ref (addr);
6205 }
6206
6207 static tree
6208 alpha_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6209 {
6210 tree offset_field, base_field, offset, base, t, r;
6211 bool indirect;
6212
6213 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6214 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6215
6216 base_field = TYPE_FIELDS (va_list_type_node);
6217 offset_field = TREE_CHAIN (base_field);
6218 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6219 valist, base_field, NULL_TREE);
6220 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6221 valist, offset_field, NULL_TREE);
6222
6223 /* Pull the fields of the structure out into temporaries. Since we never
6224 modify the base field, we can use a formal temporary. Sign-extend the
6225 offset field so that it's the proper width for pointer arithmetic. */
6226 base = get_formal_tmp_var (base_field, pre_p);
6227
6228 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6229 offset = get_initialized_tmp_var (t, pre_p, NULL);
6230
6231 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6232 if (indirect)
6233 type = build_pointer_type (type);
6234
6235 /* Find the value. Note that this will be a stable indirection, or
6236 a composite of stable indirections in the case of complex. */
6237 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6238
6239 /* Stuff the offset temporary back into its field. */
6240 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, offset_field,
6241 fold_convert (TREE_TYPE (offset_field), offset));
6242 gimplify_and_add (t, pre_p);
6243
6244 if (indirect)
6245 r = build_va_arg_indirect_ref (r);
6246
6247 return r;
6248 }
6249 \f
6250 /* Builtins. */
6251
6252 enum alpha_builtin
6253 {
6254 ALPHA_BUILTIN_CMPBGE,
6255 ALPHA_BUILTIN_EXTBL,
6256 ALPHA_BUILTIN_EXTWL,
6257 ALPHA_BUILTIN_EXTLL,
6258 ALPHA_BUILTIN_EXTQL,
6259 ALPHA_BUILTIN_EXTWH,
6260 ALPHA_BUILTIN_EXTLH,
6261 ALPHA_BUILTIN_EXTQH,
6262 ALPHA_BUILTIN_INSBL,
6263 ALPHA_BUILTIN_INSWL,
6264 ALPHA_BUILTIN_INSLL,
6265 ALPHA_BUILTIN_INSQL,
6266 ALPHA_BUILTIN_INSWH,
6267 ALPHA_BUILTIN_INSLH,
6268 ALPHA_BUILTIN_INSQH,
6269 ALPHA_BUILTIN_MSKBL,
6270 ALPHA_BUILTIN_MSKWL,
6271 ALPHA_BUILTIN_MSKLL,
6272 ALPHA_BUILTIN_MSKQL,
6273 ALPHA_BUILTIN_MSKWH,
6274 ALPHA_BUILTIN_MSKLH,
6275 ALPHA_BUILTIN_MSKQH,
6276 ALPHA_BUILTIN_UMULH,
6277 ALPHA_BUILTIN_ZAP,
6278 ALPHA_BUILTIN_ZAPNOT,
6279 ALPHA_BUILTIN_AMASK,
6280 ALPHA_BUILTIN_IMPLVER,
6281 ALPHA_BUILTIN_RPCC,
6282 ALPHA_BUILTIN_THREAD_POINTER,
6283 ALPHA_BUILTIN_SET_THREAD_POINTER,
6284
6285 /* TARGET_MAX */
6286 ALPHA_BUILTIN_MINUB8,
6287 ALPHA_BUILTIN_MINSB8,
6288 ALPHA_BUILTIN_MINUW4,
6289 ALPHA_BUILTIN_MINSW4,
6290 ALPHA_BUILTIN_MAXUB8,
6291 ALPHA_BUILTIN_MAXSB8,
6292 ALPHA_BUILTIN_MAXUW4,
6293 ALPHA_BUILTIN_MAXSW4,
6294 ALPHA_BUILTIN_PERR,
6295 ALPHA_BUILTIN_PKLB,
6296 ALPHA_BUILTIN_PKWB,
6297 ALPHA_BUILTIN_UNPKBL,
6298 ALPHA_BUILTIN_UNPKBW,
6299
6300 /* TARGET_CIX */
6301 ALPHA_BUILTIN_CTTZ,
6302 ALPHA_BUILTIN_CTLZ,
6303 ALPHA_BUILTIN_CTPOP,
6304
6305 ALPHA_BUILTIN_max
6306 };
6307
6308 static unsigned int const code_for_builtin[ALPHA_BUILTIN_max] = {
6309 CODE_FOR_builtin_cmpbge,
6310 CODE_FOR_builtin_extbl,
6311 CODE_FOR_builtin_extwl,
6312 CODE_FOR_builtin_extll,
6313 CODE_FOR_builtin_extql,
6314 CODE_FOR_builtin_extwh,
6315 CODE_FOR_builtin_extlh,
6316 CODE_FOR_builtin_extqh,
6317 CODE_FOR_builtin_insbl,
6318 CODE_FOR_builtin_inswl,
6319 CODE_FOR_builtin_insll,
6320 CODE_FOR_builtin_insql,
6321 CODE_FOR_builtin_inswh,
6322 CODE_FOR_builtin_inslh,
6323 CODE_FOR_builtin_insqh,
6324 CODE_FOR_builtin_mskbl,
6325 CODE_FOR_builtin_mskwl,
6326 CODE_FOR_builtin_mskll,
6327 CODE_FOR_builtin_mskql,
6328 CODE_FOR_builtin_mskwh,
6329 CODE_FOR_builtin_msklh,
6330 CODE_FOR_builtin_mskqh,
6331 CODE_FOR_umuldi3_highpart,
6332 CODE_FOR_builtin_zap,
6333 CODE_FOR_builtin_zapnot,
6334 CODE_FOR_builtin_amask,
6335 CODE_FOR_builtin_implver,
6336 CODE_FOR_builtin_rpcc,
6337 CODE_FOR_load_tp,
6338 CODE_FOR_set_tp,
6339
6340 /* TARGET_MAX */
6341 CODE_FOR_builtin_minub8,
6342 CODE_FOR_builtin_minsb8,
6343 CODE_FOR_builtin_minuw4,
6344 CODE_FOR_builtin_minsw4,
6345 CODE_FOR_builtin_maxub8,
6346 CODE_FOR_builtin_maxsb8,
6347 CODE_FOR_builtin_maxuw4,
6348 CODE_FOR_builtin_maxsw4,
6349 CODE_FOR_builtin_perr,
6350 CODE_FOR_builtin_pklb,
6351 CODE_FOR_builtin_pkwb,
6352 CODE_FOR_builtin_unpkbl,
6353 CODE_FOR_builtin_unpkbw,
6354
6355 /* TARGET_CIX */
6356 CODE_FOR_ctzdi2,
6357 CODE_FOR_clzdi2,
6358 CODE_FOR_popcountdi2
6359 };
6360
6361 struct alpha_builtin_def
6362 {
6363 const char *name;
6364 enum alpha_builtin code;
6365 unsigned int target_mask;
6366 bool is_const;
6367 };
6368
6369 static struct alpha_builtin_def const zero_arg_builtins[] = {
6370 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6371 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6372 };
6373
6374 static struct alpha_builtin_def const one_arg_builtins[] = {
6375 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6376 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6377 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6378 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6379 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6380 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6381 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6382 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6383 };
6384
6385 static struct alpha_builtin_def const two_arg_builtins[] = {
6386 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6387 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6388 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6389 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6390 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6391 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6392 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6393 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6394 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6395 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6396 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6397 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6398 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6399 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6400 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6401 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6402 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6403 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6404 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6405 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6406 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6407 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6408 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6409 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6410 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6411 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6412 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6413 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6414 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6415 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6416 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6417 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6418 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6419 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6420 };
6421
6422 static GTY(()) tree alpha_v8qi_u;
6423 static GTY(()) tree alpha_v8qi_s;
6424 static GTY(()) tree alpha_v4hi_u;
6425 static GTY(()) tree alpha_v4hi_s;
6426
6427 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6428 functions pointed to by P, with function type FTYPE. */
6429
6430 static void
6431 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6432 tree ftype)
6433 {
6434 tree decl;
6435 size_t i;
6436
6437 for (i = 0; i < count; ++i, ++p)
6438 if ((target_flags & p->target_mask) == p->target_mask)
6439 {
6440 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6441 NULL, NULL);
6442 if (p->is_const)
6443 TREE_READONLY (decl) = 1;
6444 TREE_NOTHROW (decl) = 1;
6445 }
6446 }
6447
6448
6449 static void
6450 alpha_init_builtins (void)
6451 {
6452 tree dimode_integer_type_node;
6453 tree ftype, decl;
6454
6455 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6456
6457 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6458 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6459 ftype);
6460
6461 ftype = build_function_type_list (dimode_integer_type_node,
6462 dimode_integer_type_node, NULL_TREE);
6463 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6464 ftype);
6465
6466 ftype = build_function_type_list (dimode_integer_type_node,
6467 dimode_integer_type_node,
6468 dimode_integer_type_node, NULL_TREE);
6469 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6470 ftype);
6471
6472 ftype = build_function_type (ptr_type_node, void_list_node);
6473 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6474 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6475 NULL, NULL);
6476 TREE_NOTHROW (decl) = 1;
6477
6478 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6479 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6480 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6481 NULL, NULL);
6482 TREE_NOTHROW (decl) = 1;
6483
6484 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6485 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6486 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6487 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6488 }
6489
6490 /* Expand an expression EXP that calls a built-in function,
6491 with result going to TARGET if that's convenient
6492 (and in mode MODE if that's convenient).
6493 SUBTARGET may be used as the target for computing one of EXP's operands.
6494 IGNORE is nonzero if the value is to be ignored. */
6495
6496 static rtx
6497 alpha_expand_builtin (tree exp, rtx target,
6498 rtx subtarget ATTRIBUTE_UNUSED,
6499 enum machine_mode mode ATTRIBUTE_UNUSED,
6500 int ignore ATTRIBUTE_UNUSED)
6501 {
6502 #define MAX_ARGS 2
6503
6504 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6505 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6506 tree arg;
6507 call_expr_arg_iterator iter;
6508 enum insn_code icode;
6509 rtx op[MAX_ARGS], pat;
6510 int arity;
6511 bool nonvoid;
6512
6513 if (fcode >= ALPHA_BUILTIN_max)
6514 internal_error ("bad builtin fcode");
6515 icode = code_for_builtin[fcode];
6516 if (icode == 0)
6517 internal_error ("bad builtin fcode");
6518
6519 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6520
6521 arity = 0;
6522 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6523 {
6524 const struct insn_operand_data *insn_op;
6525
6526 if (arg == error_mark_node)
6527 return NULL_RTX;
6528 if (arity > MAX_ARGS)
6529 return NULL_RTX;
6530
6531 insn_op = &insn_data[icode].operand[arity + nonvoid];
6532
6533 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, 0);
6534
6535 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6536 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6537 arity++;
6538 }
6539
6540 if (nonvoid)
6541 {
6542 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6543 if (!target
6544 || GET_MODE (target) != tmode
6545 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6546 target = gen_reg_rtx (tmode);
6547 }
6548
6549 switch (arity)
6550 {
6551 case 0:
6552 pat = GEN_FCN (icode) (target);
6553 break;
6554 case 1:
6555 if (nonvoid)
6556 pat = GEN_FCN (icode) (target, op[0]);
6557 else
6558 pat = GEN_FCN (icode) (op[0]);
6559 break;
6560 case 2:
6561 pat = GEN_FCN (icode) (target, op[0], op[1]);
6562 break;
6563 default:
6564 gcc_unreachable ();
6565 }
6566 if (!pat)
6567 return NULL_RTX;
6568 emit_insn (pat);
6569
6570 if (nonvoid)
6571 return target;
6572 else
6573 return const0_rtx;
6574 }
6575
6576
6577 /* Several bits below assume HWI >= 64 bits. This should be enforced
6578 by config.gcc. */
6579 #if HOST_BITS_PER_WIDE_INT < 64
6580 # error "HOST_WIDE_INT too small"
6581 #endif
6582
6583 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6584 with an 8-bit output vector. OPINT contains the integer operands; bit N
6585 of OP_CONST is set if OPINT[N] is valid. */
6586
6587 static tree
6588 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6589 {
6590 if (op_const == 3)
6591 {
6592 int i, val;
6593 for (i = 0, val = 0; i < 8; ++i)
6594 {
6595 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6596 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6597 if (c0 >= c1)
6598 val |= 1 << i;
6599 }
6600 return build_int_cst (long_integer_type_node, val);
6601 }
6602 else if (op_const == 2 && opint[1] == 0)
6603 return build_int_cst (long_integer_type_node, 0xff);
6604 return NULL;
6605 }
6606
6607 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6608 specialized form of an AND operation. Other byte manipulation instructions
6609 are defined in terms of this instruction, so this is also used as a
6610 subroutine for other builtins.
6611
6612 OP contains the tree operands; OPINT contains the extracted integer values.
6613 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6614 OPINT may be considered. */
6615
6616 static tree
6617 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6618 long op_const)
6619 {
6620 if (op_const & 2)
6621 {
6622 unsigned HOST_WIDE_INT mask = 0;
6623 int i;
6624
6625 for (i = 0; i < 8; ++i)
6626 if ((opint[1] >> i) & 1)
6627 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6628
6629 if (op_const & 1)
6630 return build_int_cst (long_integer_type_node, opint[0] & mask);
6631
6632 if (op)
6633 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6634 build_int_cst (long_integer_type_node, mask));
6635 }
6636 else if ((op_const & 1) && opint[0] == 0)
6637 return build_int_cst (long_integer_type_node, 0);
6638 return NULL;
6639 }
6640
6641 /* Fold the builtins for the EXT family of instructions. */
6642
6643 static tree
6644 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6645 long op_const, unsigned HOST_WIDE_INT bytemask,
6646 bool is_high)
6647 {
6648 long zap_const = 2;
6649 tree *zap_op = NULL;
6650
6651 if (op_const & 2)
6652 {
6653 unsigned HOST_WIDE_INT loc;
6654
6655 loc = opint[1] & 7;
6656 if (BYTES_BIG_ENDIAN)
6657 loc ^= 7;
6658 loc *= 8;
6659
6660 if (loc != 0)
6661 {
6662 if (op_const & 1)
6663 {
6664 unsigned HOST_WIDE_INT temp = opint[0];
6665 if (is_high)
6666 temp <<= loc;
6667 else
6668 temp >>= loc;
6669 opint[0] = temp;
6670 zap_const = 3;
6671 }
6672 }
6673 else
6674 zap_op = op;
6675 }
6676
6677 opint[1] = bytemask;
6678 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6679 }
6680
6681 /* Fold the builtins for the INS family of instructions. */
6682
6683 static tree
6684 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6685 long op_const, unsigned HOST_WIDE_INT bytemask,
6686 bool is_high)
6687 {
6688 if ((op_const & 1) && opint[0] == 0)
6689 return build_int_cst (long_integer_type_node, 0);
6690
6691 if (op_const & 2)
6692 {
6693 unsigned HOST_WIDE_INT temp, loc, byteloc;
6694 tree *zap_op = NULL;
6695
6696 loc = opint[1] & 7;
6697 if (BYTES_BIG_ENDIAN)
6698 loc ^= 7;
6699 bytemask <<= loc;
6700
6701 temp = opint[0];
6702 if (is_high)
6703 {
6704 byteloc = (64 - (loc * 8)) & 0x3f;
6705 if (byteloc == 0)
6706 zap_op = op;
6707 else
6708 temp >>= byteloc;
6709 bytemask >>= 8;
6710 }
6711 else
6712 {
6713 byteloc = loc * 8;
6714 if (byteloc == 0)
6715 zap_op = op;
6716 else
6717 temp <<= byteloc;
6718 }
6719
6720 opint[0] = temp;
6721 opint[1] = bytemask;
6722 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6723 }
6724
6725 return NULL;
6726 }
6727
6728 static tree
6729 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6730 long op_const, unsigned HOST_WIDE_INT bytemask,
6731 bool is_high)
6732 {
6733 if (op_const & 2)
6734 {
6735 unsigned HOST_WIDE_INT loc;
6736
6737 loc = opint[1] & 7;
6738 if (BYTES_BIG_ENDIAN)
6739 loc ^= 7;
6740 bytemask <<= loc;
6741
6742 if (is_high)
6743 bytemask >>= 8;
6744
6745 opint[1] = bytemask ^ 0xff;
6746 }
6747
6748 return alpha_fold_builtin_zapnot (op, opint, op_const);
6749 }
6750
6751 static tree
6752 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6753 {
6754 switch (op_const)
6755 {
6756 case 3:
6757 {
6758 unsigned HOST_WIDE_INT l;
6759 HOST_WIDE_INT h;
6760
6761 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6762
6763 #if HOST_BITS_PER_WIDE_INT > 64
6764 # error fixme
6765 #endif
6766
6767 return build_int_cst (long_integer_type_node, h);
6768 }
6769
6770 case 1:
6771 opint[1] = opint[0];
6772 /* FALLTHRU */
6773 case 2:
6774 /* Note that (X*1) >> 64 == 0. */
6775 if (opint[1] == 0 || opint[1] == 1)
6776 return build_int_cst (long_integer_type_node, 0);
6777 break;
6778 }
6779 return NULL;
6780 }
6781
6782 static tree
6783 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6784 {
6785 tree op0 = fold_convert (vtype, op[0]);
6786 tree op1 = fold_convert (vtype, op[1]);
6787 tree val = fold_build2 (code, vtype, op0, op1);
6788 return fold_convert (long_integer_type_node, val);
6789 }
6790
6791 static tree
6792 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6793 {
6794 unsigned HOST_WIDE_INT temp = 0;
6795 int i;
6796
6797 if (op_const != 3)
6798 return NULL;
6799
6800 for (i = 0; i < 8; ++i)
6801 {
6802 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6803 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6804 if (a >= b)
6805 temp += a - b;
6806 else
6807 temp += b - a;
6808 }
6809
6810 return build_int_cst (long_integer_type_node, temp);
6811 }
6812
6813 static tree
6814 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6815 {
6816 unsigned HOST_WIDE_INT temp;
6817
6818 if (op_const == 0)
6819 return NULL;
6820
6821 temp = opint[0] & 0xff;
6822 temp |= (opint[0] >> 24) & 0xff00;
6823
6824 return build_int_cst (long_integer_type_node, temp);
6825 }
6826
6827 static tree
6828 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6829 {
6830 unsigned HOST_WIDE_INT temp;
6831
6832 if (op_const == 0)
6833 return NULL;
6834
6835 temp = opint[0] & 0xff;
6836 temp |= (opint[0] >> 8) & 0xff00;
6837 temp |= (opint[0] >> 16) & 0xff0000;
6838 temp |= (opint[0] >> 24) & 0xff000000;
6839
6840 return build_int_cst (long_integer_type_node, temp);
6841 }
6842
6843 static tree
6844 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6845 {
6846 unsigned HOST_WIDE_INT temp;
6847
6848 if (op_const == 0)
6849 return NULL;
6850
6851 temp = opint[0] & 0xff;
6852 temp |= (opint[0] & 0xff00) << 24;
6853
6854 return build_int_cst (long_integer_type_node, temp);
6855 }
6856
6857 static tree
6858 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6859 {
6860 unsigned HOST_WIDE_INT temp;
6861
6862 if (op_const == 0)
6863 return NULL;
6864
6865 temp = opint[0] & 0xff;
6866 temp |= (opint[0] & 0x0000ff00) << 8;
6867 temp |= (opint[0] & 0x00ff0000) << 16;
6868 temp |= (opint[0] & 0xff000000) << 24;
6869
6870 return build_int_cst (long_integer_type_node, temp);
6871 }
6872
6873 static tree
6874 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6875 {
6876 unsigned HOST_WIDE_INT temp;
6877
6878 if (op_const == 0)
6879 return NULL;
6880
6881 if (opint[0] == 0)
6882 temp = 64;
6883 else
6884 temp = exact_log2 (opint[0] & -opint[0]);
6885
6886 return build_int_cst (long_integer_type_node, temp);
6887 }
6888
6889 static tree
6890 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6891 {
6892 unsigned HOST_WIDE_INT temp;
6893
6894 if (op_const == 0)
6895 return NULL;
6896
6897 if (opint[0] == 0)
6898 temp = 64;
6899 else
6900 temp = 64 - floor_log2 (opint[0]) - 1;
6901
6902 return build_int_cst (long_integer_type_node, temp);
6903 }
6904
6905 static tree
6906 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6907 {
6908 unsigned HOST_WIDE_INT temp, op;
6909
6910 if (op_const == 0)
6911 return NULL;
6912
6913 op = opint[0];
6914 temp = 0;
6915 while (op)
6916 temp++, op &= op - 1;
6917
6918 return build_int_cst (long_integer_type_node, temp);
6919 }
6920
6921 /* Fold one of our builtin functions. */
6922
6923 static tree
6924 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6925 {
6926 tree op[MAX_ARGS], t;
6927 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6928 long op_const = 0, arity = 0;
6929
6930 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6931 {
6932 tree arg = TREE_VALUE (t);
6933 if (arg == error_mark_node)
6934 return NULL;
6935 if (arity >= MAX_ARGS)
6936 return NULL;
6937
6938 op[arity] = arg;
6939 opint[arity] = 0;
6940 if (TREE_CODE (arg) == INTEGER_CST)
6941 {
6942 op_const |= 1L << arity;
6943 opint[arity] = int_cst_value (arg);
6944 }
6945 }
6946
6947 switch (DECL_FUNCTION_CODE (fndecl))
6948 {
6949 case ALPHA_BUILTIN_CMPBGE:
6950 return alpha_fold_builtin_cmpbge (opint, op_const);
6951
6952 case ALPHA_BUILTIN_EXTBL:
6953 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
6954 case ALPHA_BUILTIN_EXTWL:
6955 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
6956 case ALPHA_BUILTIN_EXTLL:
6957 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
6958 case ALPHA_BUILTIN_EXTQL:
6959 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
6960 case ALPHA_BUILTIN_EXTWH:
6961 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
6962 case ALPHA_BUILTIN_EXTLH:
6963 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
6964 case ALPHA_BUILTIN_EXTQH:
6965 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
6966
6967 case ALPHA_BUILTIN_INSBL:
6968 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
6969 case ALPHA_BUILTIN_INSWL:
6970 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
6971 case ALPHA_BUILTIN_INSLL:
6972 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
6973 case ALPHA_BUILTIN_INSQL:
6974 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
6975 case ALPHA_BUILTIN_INSWH:
6976 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
6977 case ALPHA_BUILTIN_INSLH:
6978 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
6979 case ALPHA_BUILTIN_INSQH:
6980 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
6981
6982 case ALPHA_BUILTIN_MSKBL:
6983 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
6984 case ALPHA_BUILTIN_MSKWL:
6985 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
6986 case ALPHA_BUILTIN_MSKLL:
6987 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
6988 case ALPHA_BUILTIN_MSKQL:
6989 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
6990 case ALPHA_BUILTIN_MSKWH:
6991 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
6992 case ALPHA_BUILTIN_MSKLH:
6993 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
6994 case ALPHA_BUILTIN_MSKQH:
6995 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
6996
6997 case ALPHA_BUILTIN_UMULH:
6998 return alpha_fold_builtin_umulh (opint, op_const);
6999
7000 case ALPHA_BUILTIN_ZAP:
7001 opint[1] ^= 0xff;
7002 /* FALLTHRU */
7003 case ALPHA_BUILTIN_ZAPNOT:
7004 return alpha_fold_builtin_zapnot (op, opint, op_const);
7005
7006 case ALPHA_BUILTIN_MINUB8:
7007 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7008 case ALPHA_BUILTIN_MINSB8:
7009 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7010 case ALPHA_BUILTIN_MINUW4:
7011 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7012 case ALPHA_BUILTIN_MINSW4:
7013 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7014 case ALPHA_BUILTIN_MAXUB8:
7015 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7016 case ALPHA_BUILTIN_MAXSB8:
7017 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7018 case ALPHA_BUILTIN_MAXUW4:
7019 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7020 case ALPHA_BUILTIN_MAXSW4:
7021 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7022
7023 case ALPHA_BUILTIN_PERR:
7024 return alpha_fold_builtin_perr (opint, op_const);
7025 case ALPHA_BUILTIN_PKLB:
7026 return alpha_fold_builtin_pklb (opint, op_const);
7027 case ALPHA_BUILTIN_PKWB:
7028 return alpha_fold_builtin_pkwb (opint, op_const);
7029 case ALPHA_BUILTIN_UNPKBL:
7030 return alpha_fold_builtin_unpkbl (opint, op_const);
7031 case ALPHA_BUILTIN_UNPKBW:
7032 return alpha_fold_builtin_unpkbw (opint, op_const);
7033
7034 case ALPHA_BUILTIN_CTTZ:
7035 return alpha_fold_builtin_cttz (opint, op_const);
7036 case ALPHA_BUILTIN_CTLZ:
7037 return alpha_fold_builtin_ctlz (opint, op_const);
7038 case ALPHA_BUILTIN_CTPOP:
7039 return alpha_fold_builtin_ctpop (opint, op_const);
7040
7041 case ALPHA_BUILTIN_AMASK:
7042 case ALPHA_BUILTIN_IMPLVER:
7043 case ALPHA_BUILTIN_RPCC:
7044 case ALPHA_BUILTIN_THREAD_POINTER:
7045 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7046 /* None of these are foldable at compile-time. */
7047 default:
7048 return NULL;
7049 }
7050 }
7051 \f
7052 /* This page contains routines that are used to determine what the function
7053 prologue and epilogue code will do and write them out. */
7054
7055 /* Compute the size of the save area in the stack. */
7056
7057 /* These variables are used for communication between the following functions.
7058 They indicate various things about the current function being compiled
7059 that are used to tell what kind of prologue, epilogue and procedure
7060 descriptor to generate. */
7061
7062 /* Nonzero if we need a stack procedure. */
7063 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7064 static enum alpha_procedure_types alpha_procedure_type;
7065
7066 /* Register number (either FP or SP) that is used to unwind the frame. */
7067 static int vms_unwind_regno;
7068
7069 /* Register number used to save FP. We need not have one for RA since
7070 we don't modify it for register procedures. This is only defined
7071 for register frame procedures. */
7072 static int vms_save_fp_regno;
7073
7074 /* Register number used to reference objects off our PV. */
7075 static int vms_base_regno;
7076
7077 /* Compute register masks for saved registers. */
7078
7079 static void
7080 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7081 {
7082 unsigned long imask = 0;
7083 unsigned long fmask = 0;
7084 unsigned int i;
7085
7086 /* When outputting a thunk, we don't have valid register life info,
7087 but assemble_start_function wants to output .frame and .mask
7088 directives. */
7089 if (current_function_is_thunk)
7090 {
7091 *imaskP = 0;
7092 *fmaskP = 0;
7093 return;
7094 }
7095
7096 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7097 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7098
7099 /* One for every register we have to save. */
7100 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7101 if (! fixed_regs[i] && ! call_used_regs[i]
7102 && df_regs_ever_live_p (i) && i != REG_RA
7103 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7104 {
7105 if (i < 32)
7106 imask |= (1UL << i);
7107 else
7108 fmask |= (1UL << (i - 32));
7109 }
7110
7111 /* We need to restore these for the handler. */
7112 if (current_function_calls_eh_return)
7113 {
7114 for (i = 0; ; ++i)
7115 {
7116 unsigned regno = EH_RETURN_DATA_REGNO (i);
7117 if (regno == INVALID_REGNUM)
7118 break;
7119 imask |= 1UL << regno;
7120 }
7121 }
7122
7123 /* If any register spilled, then spill the return address also. */
7124 /* ??? This is required by the Digital stack unwind specification
7125 and isn't needed if we're doing Dwarf2 unwinding. */
7126 if (imask || fmask || alpha_ra_ever_killed ())
7127 imask |= (1UL << REG_RA);
7128
7129 *imaskP = imask;
7130 *fmaskP = fmask;
7131 }
7132
7133 int
7134 alpha_sa_size (void)
7135 {
7136 unsigned long mask[2];
7137 int sa_size = 0;
7138 int i, j;
7139
7140 alpha_sa_mask (&mask[0], &mask[1]);
7141
7142 if (TARGET_ABI_UNICOSMK)
7143 {
7144 if (mask[0] || mask[1])
7145 sa_size = 14;
7146 }
7147 else
7148 {
7149 for (j = 0; j < 2; ++j)
7150 for (i = 0; i < 32; ++i)
7151 if ((mask[j] >> i) & 1)
7152 sa_size++;
7153 }
7154
7155 if (TARGET_ABI_UNICOSMK)
7156 {
7157 /* We might not need to generate a frame if we don't make any calls
7158 (including calls to __T3E_MISMATCH if this is a vararg function),
7159 don't have any local variables which require stack slots, don't
7160 use alloca and have not determined that we need a frame for other
7161 reasons. */
7162
7163 alpha_procedure_type
7164 = (sa_size || get_frame_size() != 0
7165 || current_function_outgoing_args_size
7166 || current_function_stdarg || current_function_calls_alloca
7167 || frame_pointer_needed)
7168 ? PT_STACK : PT_REGISTER;
7169
7170 /* Always reserve space for saving callee-saved registers if we
7171 need a frame as required by the calling convention. */
7172 if (alpha_procedure_type == PT_STACK)
7173 sa_size = 14;
7174 }
7175 else if (TARGET_ABI_OPEN_VMS)
7176 {
7177 /* Start by assuming we can use a register procedure if we don't
7178 make any calls (REG_RA not used) or need to save any
7179 registers and a stack procedure if we do. */
7180 if ((mask[0] >> REG_RA) & 1)
7181 alpha_procedure_type = PT_STACK;
7182 else if (get_frame_size() != 0)
7183 alpha_procedure_type = PT_REGISTER;
7184 else
7185 alpha_procedure_type = PT_NULL;
7186
7187 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7188 made the final decision on stack procedure vs register procedure. */
7189 if (alpha_procedure_type == PT_STACK)
7190 sa_size -= 2;
7191
7192 /* Decide whether to refer to objects off our PV via FP or PV.
7193 If we need FP for something else or if we receive a nonlocal
7194 goto (which expects PV to contain the value), we must use PV.
7195 Otherwise, start by assuming we can use FP. */
7196
7197 vms_base_regno
7198 = (frame_pointer_needed
7199 || current_function_has_nonlocal_label
7200 || alpha_procedure_type == PT_STACK
7201 || current_function_outgoing_args_size)
7202 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7203
7204 /* If we want to copy PV into FP, we need to find some register
7205 in which to save FP. */
7206
7207 vms_save_fp_regno = -1;
7208 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7209 for (i = 0; i < 32; i++)
7210 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7211 vms_save_fp_regno = i;
7212
7213 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7214 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7215 else if (alpha_procedure_type == PT_NULL)
7216 vms_base_regno = REG_PV;
7217
7218 /* Stack unwinding should be done via FP unless we use it for PV. */
7219 vms_unwind_regno = (vms_base_regno == REG_PV
7220 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7221
7222 /* If this is a stack procedure, allow space for saving FP and RA. */
7223 if (alpha_procedure_type == PT_STACK)
7224 sa_size += 2;
7225 }
7226 else
7227 {
7228 /* Our size must be even (multiple of 16 bytes). */
7229 if (sa_size & 1)
7230 sa_size++;
7231 }
7232
7233 return sa_size * 8;
7234 }
7235
7236 /* Define the offset between two registers, one to be eliminated,
7237 and the other its replacement, at the start of a routine. */
7238
7239 HOST_WIDE_INT
7240 alpha_initial_elimination_offset (unsigned int from,
7241 unsigned int to ATTRIBUTE_UNUSED)
7242 {
7243 HOST_WIDE_INT ret;
7244
7245 ret = alpha_sa_size ();
7246 ret += ALPHA_ROUND (current_function_outgoing_args_size);
7247
7248 switch (from)
7249 {
7250 case FRAME_POINTER_REGNUM:
7251 break;
7252
7253 case ARG_POINTER_REGNUM:
7254 ret += (ALPHA_ROUND (get_frame_size ()
7255 + current_function_pretend_args_size)
7256 - current_function_pretend_args_size);
7257 break;
7258
7259 default:
7260 gcc_unreachable ();
7261 }
7262
7263 return ret;
7264 }
7265
7266 int
7267 alpha_pv_save_size (void)
7268 {
7269 alpha_sa_size ();
7270 return alpha_procedure_type == PT_STACK ? 8 : 0;
7271 }
7272
7273 int
7274 alpha_using_fp (void)
7275 {
7276 alpha_sa_size ();
7277 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7278 }
7279
7280 #if TARGET_ABI_OPEN_VMS
7281
7282 const struct attribute_spec vms_attribute_table[] =
7283 {
7284 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7285 { "overlaid", 0, 0, true, false, false, NULL },
7286 { "global", 0, 0, true, false, false, NULL },
7287 { "initialize", 0, 0, true, false, false, NULL },
7288 { NULL, 0, 0, false, false, false, NULL }
7289 };
7290
7291 #endif
7292
7293 static int
7294 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7295 {
7296 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7297 }
7298
7299 int
7300 alpha_find_lo_sum_using_gp (rtx insn)
7301 {
7302 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7303 }
7304
7305 static int
7306 alpha_does_function_need_gp (void)
7307 {
7308 rtx insn;
7309
7310 /* The GP being variable is an OSF abi thing. */
7311 if (! TARGET_ABI_OSF)
7312 return 0;
7313
7314 /* We need the gp to load the address of __mcount. */
7315 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7316 return 1;
7317
7318 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7319 if (current_function_is_thunk)
7320 return 1;
7321
7322 /* The nonlocal receiver pattern assumes that the gp is valid for
7323 the nested function. Reasonable because it's almost always set
7324 correctly already. For the cases where that's wrong, make sure
7325 the nested function loads its gp on entry. */
7326 if (current_function_has_nonlocal_goto)
7327 return 1;
7328
7329 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7330 Even if we are a static function, we still need to do this in case
7331 our address is taken and passed to something like qsort. */
7332
7333 push_topmost_sequence ();
7334 insn = get_insns ();
7335 pop_topmost_sequence ();
7336
7337 for (; insn; insn = NEXT_INSN (insn))
7338 if (INSN_P (insn)
7339 && ! JUMP_TABLE_DATA_P (insn)
7340 && GET_CODE (PATTERN (insn)) != USE
7341 && GET_CODE (PATTERN (insn)) != CLOBBER
7342 && get_attr_usegp (insn))
7343 return 1;
7344
7345 return 0;
7346 }
7347
7348 \f
7349 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7350 sequences. */
7351
7352 static rtx
7353 set_frame_related_p (void)
7354 {
7355 rtx seq = get_insns ();
7356 rtx insn;
7357
7358 end_sequence ();
7359
7360 if (!seq)
7361 return NULL_RTX;
7362
7363 if (INSN_P (seq))
7364 {
7365 insn = seq;
7366 while (insn != NULL_RTX)
7367 {
7368 RTX_FRAME_RELATED_P (insn) = 1;
7369 insn = NEXT_INSN (insn);
7370 }
7371 seq = emit_insn (seq);
7372 }
7373 else
7374 {
7375 seq = emit_insn (seq);
7376 RTX_FRAME_RELATED_P (seq) = 1;
7377 }
7378 return seq;
7379 }
7380
7381 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7382
7383 /* Generates a store with the proper unwind info attached. VALUE is
7384 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7385 contains SP+FRAME_BIAS, and that is the unwind info that should be
7386 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7387 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7388
7389 static void
7390 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7391 HOST_WIDE_INT base_ofs, rtx frame_reg)
7392 {
7393 rtx addr, mem, insn;
7394
7395 addr = plus_constant (base_reg, base_ofs);
7396 mem = gen_rtx_MEM (DImode, addr);
7397 set_mem_alias_set (mem, alpha_sr_alias_set);
7398
7399 insn = emit_move_insn (mem, value);
7400 RTX_FRAME_RELATED_P (insn) = 1;
7401
7402 if (frame_bias || value != frame_reg)
7403 {
7404 if (frame_bias)
7405 {
7406 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7407 mem = gen_rtx_MEM (DImode, addr);
7408 }
7409
7410 REG_NOTES (insn)
7411 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7412 gen_rtx_SET (VOIDmode, mem, frame_reg),
7413 REG_NOTES (insn));
7414 }
7415 }
7416
7417 static void
7418 emit_frame_store (unsigned int regno, rtx base_reg,
7419 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7420 {
7421 rtx reg = gen_rtx_REG (DImode, regno);
7422 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7423 }
7424
7425 /* Write function prologue. */
7426
7427 /* On vms we have two kinds of functions:
7428
7429 - stack frame (PROC_STACK)
7430 these are 'normal' functions with local vars and which are
7431 calling other functions
7432 - register frame (PROC_REGISTER)
7433 keeps all data in registers, needs no stack
7434
7435 We must pass this to the assembler so it can generate the
7436 proper pdsc (procedure descriptor)
7437 This is done with the '.pdesc' command.
7438
7439 On not-vms, we don't really differentiate between the two, as we can
7440 simply allocate stack without saving registers. */
7441
7442 void
7443 alpha_expand_prologue (void)
7444 {
7445 /* Registers to save. */
7446 unsigned long imask = 0;
7447 unsigned long fmask = 0;
7448 /* Stack space needed for pushing registers clobbered by us. */
7449 HOST_WIDE_INT sa_size;
7450 /* Complete stack size needed. */
7451 HOST_WIDE_INT frame_size;
7452 /* Offset from base reg to register save area. */
7453 HOST_WIDE_INT reg_offset;
7454 rtx sa_reg;
7455 int i;
7456
7457 sa_size = alpha_sa_size ();
7458
7459 frame_size = get_frame_size ();
7460 if (TARGET_ABI_OPEN_VMS)
7461 frame_size = ALPHA_ROUND (sa_size
7462 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7463 + frame_size
7464 + current_function_pretend_args_size);
7465 else if (TARGET_ABI_UNICOSMK)
7466 /* We have to allocate space for the DSIB if we generate a frame. */
7467 frame_size = ALPHA_ROUND (sa_size
7468 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7469 + ALPHA_ROUND (frame_size
7470 + current_function_outgoing_args_size);
7471 else
7472 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7473 + sa_size
7474 + ALPHA_ROUND (frame_size
7475 + current_function_pretend_args_size));
7476
7477 if (TARGET_ABI_OPEN_VMS)
7478 reg_offset = 8;
7479 else
7480 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7481
7482 alpha_sa_mask (&imask, &fmask);
7483
7484 /* Emit an insn to reload GP, if needed. */
7485 if (TARGET_ABI_OSF)
7486 {
7487 alpha_function_needs_gp = alpha_does_function_need_gp ();
7488 if (alpha_function_needs_gp)
7489 emit_insn (gen_prologue_ldgp ());
7490 }
7491
7492 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7493 the call to mcount ourselves, rather than having the linker do it
7494 magically in response to -pg. Since _mcount has special linkage,
7495 don't represent the call as a call. */
7496 if (TARGET_PROFILING_NEEDS_GP && current_function_profile)
7497 emit_insn (gen_prologue_mcount ());
7498
7499 if (TARGET_ABI_UNICOSMK)
7500 unicosmk_gen_dsib (&imask);
7501
7502 /* Adjust the stack by the frame size. If the frame size is > 4096
7503 bytes, we need to be sure we probe somewhere in the first and last
7504 4096 bytes (we can probably get away without the latter test) and
7505 every 8192 bytes in between. If the frame size is > 32768, we
7506 do this in a loop. Otherwise, we generate the explicit probe
7507 instructions.
7508
7509 Note that we are only allowed to adjust sp once in the prologue. */
7510
7511 if (frame_size <= 32768)
7512 {
7513 if (frame_size > 4096)
7514 {
7515 int probed;
7516
7517 for (probed = 4096; probed < frame_size; probed += 8192)
7518 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7519 ? -probed + 64
7520 : -probed)));
7521
7522 /* We only have to do this probe if we aren't saving registers. */
7523 if (sa_size == 0 && frame_size > probed - 4096)
7524 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7525 }
7526
7527 if (frame_size != 0)
7528 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7529 GEN_INT (TARGET_ABI_UNICOSMK
7530 ? -frame_size + 64
7531 : -frame_size))));
7532 }
7533 else
7534 {
7535 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7536 number of 8192 byte blocks to probe. We then probe each block
7537 in the loop and then set SP to the proper location. If the
7538 amount remaining is > 4096, we have to do one more probe if we
7539 are not saving any registers. */
7540
7541 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7542 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7543 rtx ptr = gen_rtx_REG (DImode, 22);
7544 rtx count = gen_rtx_REG (DImode, 23);
7545 rtx seq;
7546
7547 emit_move_insn (count, GEN_INT (blocks));
7548 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7549 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7550
7551 /* Because of the difficulty in emitting a new basic block this
7552 late in the compilation, generate the loop as a single insn. */
7553 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7554
7555 if (leftover > 4096 && sa_size == 0)
7556 {
7557 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7558 MEM_VOLATILE_P (last) = 1;
7559 emit_move_insn (last, const0_rtx);
7560 }
7561
7562 if (TARGET_ABI_WINDOWS_NT)
7563 {
7564 /* For NT stack unwind (done by 'reverse execution'), it's
7565 not OK to take the result of a loop, even though the value
7566 is already in ptr, so we reload it via a single operation
7567 and subtract it to sp.
7568
7569 Yes, that's correct -- we have to reload the whole constant
7570 into a temporary via ldah+lda then subtract from sp. */
7571
7572 HOST_WIDE_INT lo, hi;
7573 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7574 hi = frame_size - lo;
7575
7576 emit_move_insn (ptr, GEN_INT (hi));
7577 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7578 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7579 ptr));
7580 }
7581 else
7582 {
7583 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7584 GEN_INT (-leftover)));
7585 }
7586
7587 /* This alternative is special, because the DWARF code cannot
7588 possibly intuit through the loop above. So we invent this
7589 note it looks at instead. */
7590 RTX_FRAME_RELATED_P (seq) = 1;
7591 REG_NOTES (seq)
7592 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7593 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7594 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7595 GEN_INT (TARGET_ABI_UNICOSMK
7596 ? -frame_size + 64
7597 : -frame_size))),
7598 REG_NOTES (seq));
7599 }
7600
7601 if (!TARGET_ABI_UNICOSMK)
7602 {
7603 HOST_WIDE_INT sa_bias = 0;
7604
7605 /* Cope with very large offsets to the register save area. */
7606 sa_reg = stack_pointer_rtx;
7607 if (reg_offset + sa_size > 0x8000)
7608 {
7609 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7610 rtx sa_bias_rtx;
7611
7612 if (low + sa_size <= 0x8000)
7613 sa_bias = reg_offset - low, reg_offset = low;
7614 else
7615 sa_bias = reg_offset, reg_offset = 0;
7616
7617 sa_reg = gen_rtx_REG (DImode, 24);
7618 sa_bias_rtx = GEN_INT (sa_bias);
7619
7620 if (add_operand (sa_bias_rtx, DImode))
7621 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7622 else
7623 {
7624 emit_move_insn (sa_reg, sa_bias_rtx);
7625 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7626 }
7627 }
7628
7629 /* Save regs in stack order. Beginning with VMS PV. */
7630 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7631 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7632
7633 /* Save register RA next. */
7634 if (imask & (1UL << REG_RA))
7635 {
7636 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7637 imask &= ~(1UL << REG_RA);
7638 reg_offset += 8;
7639 }
7640
7641 /* Now save any other registers required to be saved. */
7642 for (i = 0; i < 31; i++)
7643 if (imask & (1UL << i))
7644 {
7645 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7646 reg_offset += 8;
7647 }
7648
7649 for (i = 0; i < 31; i++)
7650 if (fmask & (1UL << i))
7651 {
7652 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7653 reg_offset += 8;
7654 }
7655 }
7656 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7657 {
7658 /* The standard frame on the T3E includes space for saving registers.
7659 We just have to use it. We don't have to save the return address and
7660 the old frame pointer here - they are saved in the DSIB. */
7661
7662 reg_offset = -56;
7663 for (i = 9; i < 15; i++)
7664 if (imask & (1UL << i))
7665 {
7666 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7667 reg_offset -= 8;
7668 }
7669 for (i = 2; i < 10; i++)
7670 if (fmask & (1UL << i))
7671 {
7672 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7673 reg_offset -= 8;
7674 }
7675 }
7676
7677 if (TARGET_ABI_OPEN_VMS)
7678 {
7679 if (alpha_procedure_type == PT_REGISTER)
7680 /* Register frame procedures save the fp.
7681 ?? Ought to have a dwarf2 save for this. */
7682 emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7683 hard_frame_pointer_rtx);
7684
7685 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7686 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7687 gen_rtx_REG (DImode, REG_PV)));
7688
7689 if (alpha_procedure_type != PT_NULL
7690 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7691 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7692
7693 /* If we have to allocate space for outgoing args, do it now. */
7694 if (current_function_outgoing_args_size != 0)
7695 {
7696 rtx seq
7697 = emit_move_insn (stack_pointer_rtx,
7698 plus_constant
7699 (hard_frame_pointer_rtx,
7700 - (ALPHA_ROUND
7701 (current_function_outgoing_args_size))));
7702
7703 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7704 if ! frame_pointer_needed. Setting the bit will change the CFA
7705 computation rule to use sp again, which would be wrong if we had
7706 frame_pointer_needed, as this means sp might move unpredictably
7707 later on.
7708
7709 Also, note that
7710 frame_pointer_needed
7711 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7712 and
7713 current_function_outgoing_args_size != 0
7714 => alpha_procedure_type != PT_NULL,
7715
7716 so when we are not setting the bit here, we are guaranteed to
7717 have emitted an FRP frame pointer update just before. */
7718 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7719 }
7720 }
7721 else if (!TARGET_ABI_UNICOSMK)
7722 {
7723 /* If we need a frame pointer, set it from the stack pointer. */
7724 if (frame_pointer_needed)
7725 {
7726 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7727 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7728 else
7729 /* This must always be the last instruction in the
7730 prologue, thus we emit a special move + clobber. */
7731 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7732 stack_pointer_rtx, sa_reg)));
7733 }
7734 }
7735
7736 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7737 the prologue, for exception handling reasons, we cannot do this for
7738 any insn that might fault. We could prevent this for mems with a
7739 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7740 have to prevent all such scheduling with a blockage.
7741
7742 Linux, on the other hand, never bothered to implement OSF/1's
7743 exception handling, and so doesn't care about such things. Anyone
7744 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7745
7746 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7747 emit_insn (gen_blockage ());
7748 }
7749
7750 /* Count the number of .file directives, so that .loc is up to date. */
7751 int num_source_filenames = 0;
7752
7753 /* Output the textual info surrounding the prologue. */
7754
7755 void
7756 alpha_start_function (FILE *file, const char *fnname,
7757 tree decl ATTRIBUTE_UNUSED)
7758 {
7759 unsigned long imask = 0;
7760 unsigned long fmask = 0;
7761 /* Stack space needed for pushing registers clobbered by us. */
7762 HOST_WIDE_INT sa_size;
7763 /* Complete stack size needed. */
7764 unsigned HOST_WIDE_INT frame_size;
7765 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7766 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7767 ? 524288
7768 : 1UL << 31;
7769 /* Offset from base reg to register save area. */
7770 HOST_WIDE_INT reg_offset;
7771 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7772 int i;
7773
7774 /* Don't emit an extern directive for functions defined in the same file. */
7775 if (TARGET_ABI_UNICOSMK)
7776 {
7777 tree name_tree;
7778 name_tree = get_identifier (fnname);
7779 TREE_ASM_WRITTEN (name_tree) = 1;
7780 }
7781
7782 alpha_fnname = fnname;
7783 sa_size = alpha_sa_size ();
7784
7785 frame_size = get_frame_size ();
7786 if (TARGET_ABI_OPEN_VMS)
7787 frame_size = ALPHA_ROUND (sa_size
7788 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7789 + frame_size
7790 + current_function_pretend_args_size);
7791 else if (TARGET_ABI_UNICOSMK)
7792 frame_size = ALPHA_ROUND (sa_size
7793 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7794 + ALPHA_ROUND (frame_size
7795 + current_function_outgoing_args_size);
7796 else
7797 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7798 + sa_size
7799 + ALPHA_ROUND (frame_size
7800 + current_function_pretend_args_size));
7801
7802 if (TARGET_ABI_OPEN_VMS)
7803 reg_offset = 8;
7804 else
7805 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
7806
7807 alpha_sa_mask (&imask, &fmask);
7808
7809 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7810 We have to do that before the .ent directive as we cannot switch
7811 files within procedures with native ecoff because line numbers are
7812 linked to procedure descriptors.
7813 Outputting the lineno helps debugging of one line functions as they
7814 would otherwise get no line number at all. Please note that we would
7815 like to put out last_linenum from final.c, but it is not accessible. */
7816
7817 if (write_symbols == SDB_DEBUG)
7818 {
7819 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7820 ASM_OUTPUT_SOURCE_FILENAME (file,
7821 DECL_SOURCE_FILE (current_function_decl));
7822 #endif
7823 #ifdef SDB_OUTPUT_SOURCE_LINE
7824 if (debug_info_level != DINFO_LEVEL_TERSE)
7825 SDB_OUTPUT_SOURCE_LINE (file,
7826 DECL_SOURCE_LINE (current_function_decl));
7827 #endif
7828 }
7829
7830 /* Issue function start and label. */
7831 if (TARGET_ABI_OPEN_VMS
7832 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7833 {
7834 fputs ("\t.ent ", file);
7835 assemble_name (file, fnname);
7836 putc ('\n', file);
7837
7838 /* If the function needs GP, we'll write the "..ng" label there.
7839 Otherwise, do it here. */
7840 if (TARGET_ABI_OSF
7841 && ! alpha_function_needs_gp
7842 && ! current_function_is_thunk)
7843 {
7844 putc ('$', file);
7845 assemble_name (file, fnname);
7846 fputs ("..ng:\n", file);
7847 }
7848 }
7849
7850 strcpy (entry_label, fnname);
7851 if (TARGET_ABI_OPEN_VMS)
7852 strcat (entry_label, "..en");
7853
7854 /* For public functions, the label must be globalized by appending an
7855 additional colon. */
7856 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7857 strcat (entry_label, ":");
7858
7859 ASM_OUTPUT_LABEL (file, entry_label);
7860 inside_function = TRUE;
7861
7862 if (TARGET_ABI_OPEN_VMS)
7863 fprintf (file, "\t.base $%d\n", vms_base_regno);
7864
7865 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7866 && !flag_inhibit_size_directive)
7867 {
7868 /* Set flags in procedure descriptor to request IEEE-conformant
7869 math-library routines. The value we set it to is PDSC_EXC_IEEE
7870 (/usr/include/pdsc.h). */
7871 fputs ("\t.eflag 48\n", file);
7872 }
7873
7874 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7875 alpha_auto_offset = -frame_size + current_function_pretend_args_size;
7876 alpha_arg_offset = -frame_size + 48;
7877
7878 /* Describe our frame. If the frame size is larger than an integer,
7879 print it as zero to avoid an assembler error. We won't be
7880 properly describing such a frame, but that's the best we can do. */
7881 if (TARGET_ABI_UNICOSMK)
7882 ;
7883 else if (TARGET_ABI_OPEN_VMS)
7884 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7885 HOST_WIDE_INT_PRINT_DEC "\n",
7886 vms_unwind_regno,
7887 frame_size >= (1UL << 31) ? 0 : frame_size,
7888 reg_offset);
7889 else if (!flag_inhibit_size_directive)
7890 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7891 (frame_pointer_needed
7892 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7893 frame_size >= max_frame_size ? 0 : frame_size,
7894 current_function_pretend_args_size);
7895
7896 /* Describe which registers were spilled. */
7897 if (TARGET_ABI_UNICOSMK)
7898 ;
7899 else if (TARGET_ABI_OPEN_VMS)
7900 {
7901 if (imask)
7902 /* ??? Does VMS care if mask contains ra? The old code didn't
7903 set it, so I don't here. */
7904 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7905 if (fmask)
7906 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7907 if (alpha_procedure_type == PT_REGISTER)
7908 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7909 }
7910 else if (!flag_inhibit_size_directive)
7911 {
7912 if (imask)
7913 {
7914 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7915 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7916
7917 for (i = 0; i < 32; ++i)
7918 if (imask & (1UL << i))
7919 reg_offset += 8;
7920 }
7921
7922 if (fmask)
7923 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7924 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7925 }
7926
7927 #if TARGET_ABI_OPEN_VMS
7928 /* Ifdef'ed cause link_section are only available then. */
7929 switch_to_section (readonly_data_section);
7930 fprintf (file, "\t.align 3\n");
7931 assemble_name (file, fnname); fputs ("..na:\n", file);
7932 fputs ("\t.ascii \"", file);
7933 assemble_name (file, fnname);
7934 fputs ("\\0\"\n", file);
7935 alpha_need_linkage (fnname, 1);
7936 switch_to_section (text_section);
7937 #endif
7938 }
7939
7940 /* Emit the .prologue note at the scheduled end of the prologue. */
7941
7942 static void
7943 alpha_output_function_end_prologue (FILE *file)
7944 {
7945 if (TARGET_ABI_UNICOSMK)
7946 ;
7947 else if (TARGET_ABI_OPEN_VMS)
7948 fputs ("\t.prologue\n", file);
7949 else if (TARGET_ABI_WINDOWS_NT)
7950 fputs ("\t.prologue 0\n", file);
7951 else if (!flag_inhibit_size_directive)
7952 fprintf (file, "\t.prologue %d\n",
7953 alpha_function_needs_gp || current_function_is_thunk);
7954 }
7955
7956 /* Write function epilogue. */
7957
7958 /* ??? At some point we will want to support full unwind, and so will
7959 need to mark the epilogue as well. At the moment, we just confuse
7960 dwarf2out. */
7961 #undef FRP
7962 #define FRP(exp) exp
7963
7964 void
7965 alpha_expand_epilogue (void)
7966 {
7967 /* Registers to save. */
7968 unsigned long imask = 0;
7969 unsigned long fmask = 0;
7970 /* Stack space needed for pushing registers clobbered by us. */
7971 HOST_WIDE_INT sa_size;
7972 /* Complete stack size needed. */
7973 HOST_WIDE_INT frame_size;
7974 /* Offset from base reg to register save area. */
7975 HOST_WIDE_INT reg_offset;
7976 int fp_is_frame_pointer, fp_offset;
7977 rtx sa_reg, sa_reg_exp = NULL;
7978 rtx sp_adj1, sp_adj2, mem;
7979 rtx eh_ofs;
7980 int i;
7981
7982 sa_size = alpha_sa_size ();
7983
7984 frame_size = get_frame_size ();
7985 if (TARGET_ABI_OPEN_VMS)
7986 frame_size = ALPHA_ROUND (sa_size
7987 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7988 + frame_size
7989 + current_function_pretend_args_size);
7990 else if (TARGET_ABI_UNICOSMK)
7991 frame_size = ALPHA_ROUND (sa_size
7992 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7993 + ALPHA_ROUND (frame_size
7994 + current_function_outgoing_args_size);
7995 else
7996 frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
7997 + sa_size
7998 + ALPHA_ROUND (frame_size
7999 + current_function_pretend_args_size));
8000
8001 if (TARGET_ABI_OPEN_VMS)
8002 {
8003 if (alpha_procedure_type == PT_STACK)
8004 reg_offset = 8;
8005 else
8006 reg_offset = 0;
8007 }
8008 else
8009 reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
8010
8011 alpha_sa_mask (&imask, &fmask);
8012
8013 fp_is_frame_pointer
8014 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8015 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8016 fp_offset = 0;
8017 sa_reg = stack_pointer_rtx;
8018
8019 if (current_function_calls_eh_return)
8020 eh_ofs = EH_RETURN_STACKADJ_RTX;
8021 else
8022 eh_ofs = NULL_RTX;
8023
8024 if (!TARGET_ABI_UNICOSMK && sa_size)
8025 {
8026 /* If we have a frame pointer, restore SP from it. */
8027 if ((TARGET_ABI_OPEN_VMS
8028 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8029 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8030 FRP (emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx));
8031
8032 /* Cope with very large offsets to the register save area. */
8033 if (reg_offset + sa_size > 0x8000)
8034 {
8035 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8036 HOST_WIDE_INT bias;
8037
8038 if (low + sa_size <= 0x8000)
8039 bias = reg_offset - low, reg_offset = low;
8040 else
8041 bias = reg_offset, reg_offset = 0;
8042
8043 sa_reg = gen_rtx_REG (DImode, 22);
8044 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8045
8046 FRP (emit_move_insn (sa_reg, sa_reg_exp));
8047 }
8048
8049 /* Restore registers in order, excepting a true frame pointer. */
8050
8051 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8052 if (! eh_ofs)
8053 set_mem_alias_set (mem, alpha_sr_alias_set);
8054 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8055
8056 reg_offset += 8;
8057 imask &= ~(1UL << REG_RA);
8058
8059 for (i = 0; i < 31; ++i)
8060 if (imask & (1UL << i))
8061 {
8062 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8063 fp_offset = reg_offset;
8064 else
8065 {
8066 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8067 set_mem_alias_set (mem, alpha_sr_alias_set);
8068 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8069 }
8070 reg_offset += 8;
8071 }
8072
8073 for (i = 0; i < 31; ++i)
8074 if (fmask & (1UL << i))
8075 {
8076 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8077 set_mem_alias_set (mem, alpha_sr_alias_set);
8078 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8079 reg_offset += 8;
8080 }
8081 }
8082 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8083 {
8084 /* Restore callee-saved general-purpose registers. */
8085
8086 reg_offset = -56;
8087
8088 for (i = 9; i < 15; i++)
8089 if (imask & (1UL << i))
8090 {
8091 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8092 reg_offset));
8093 set_mem_alias_set (mem, alpha_sr_alias_set);
8094 FRP (emit_move_insn (gen_rtx_REG (DImode, i), mem));
8095 reg_offset -= 8;
8096 }
8097
8098 for (i = 2; i < 10; i++)
8099 if (fmask & (1UL << i))
8100 {
8101 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8102 reg_offset));
8103 set_mem_alias_set (mem, alpha_sr_alias_set);
8104 FRP (emit_move_insn (gen_rtx_REG (DFmode, i+32), mem));
8105 reg_offset -= 8;
8106 }
8107
8108 /* Restore the return address from the DSIB. */
8109
8110 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx, -8));
8111 set_mem_alias_set (mem, alpha_sr_alias_set);
8112 FRP (emit_move_insn (gen_rtx_REG (DImode, REG_RA), mem));
8113 }
8114
8115 if (frame_size || eh_ofs)
8116 {
8117 sp_adj1 = stack_pointer_rtx;
8118
8119 if (eh_ofs)
8120 {
8121 sp_adj1 = gen_rtx_REG (DImode, 23);
8122 emit_move_insn (sp_adj1,
8123 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8124 }
8125
8126 /* If the stack size is large, begin computation into a temporary
8127 register so as not to interfere with a potential fp restore,
8128 which must be consecutive with an SP restore. */
8129 if (frame_size < 32768
8130 && ! (TARGET_ABI_UNICOSMK && current_function_calls_alloca))
8131 sp_adj2 = GEN_INT (frame_size);
8132 else if (TARGET_ABI_UNICOSMK)
8133 {
8134 sp_adj1 = gen_rtx_REG (DImode, 23);
8135 FRP (emit_move_insn (sp_adj1, hard_frame_pointer_rtx));
8136 sp_adj2 = const0_rtx;
8137 }
8138 else if (frame_size < 0x40007fffL)
8139 {
8140 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8141
8142 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8143 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8144 sp_adj1 = sa_reg;
8145 else
8146 {
8147 sp_adj1 = gen_rtx_REG (DImode, 23);
8148 FRP (emit_move_insn (sp_adj1, sp_adj2));
8149 }
8150 sp_adj2 = GEN_INT (low);
8151 }
8152 else
8153 {
8154 rtx tmp = gen_rtx_REG (DImode, 23);
8155 FRP (sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size,
8156 3, false));
8157 if (!sp_adj2)
8158 {
8159 /* We can't drop new things to memory this late, afaik,
8160 so build it up by pieces. */
8161 FRP (sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8162 -(frame_size < 0)));
8163 gcc_assert (sp_adj2);
8164 }
8165 }
8166
8167 /* From now on, things must be in order. So emit blockages. */
8168
8169 /* Restore the frame pointer. */
8170 if (TARGET_ABI_UNICOSMK)
8171 {
8172 emit_insn (gen_blockage ());
8173 mem = gen_rtx_MEM (DImode,
8174 plus_constant (hard_frame_pointer_rtx, -16));
8175 set_mem_alias_set (mem, alpha_sr_alias_set);
8176 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8177 }
8178 else if (fp_is_frame_pointer)
8179 {
8180 emit_insn (gen_blockage ());
8181 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8182 set_mem_alias_set (mem, alpha_sr_alias_set);
8183 FRP (emit_move_insn (hard_frame_pointer_rtx, mem));
8184 }
8185 else if (TARGET_ABI_OPEN_VMS)
8186 {
8187 emit_insn (gen_blockage ());
8188 FRP (emit_move_insn (hard_frame_pointer_rtx,
8189 gen_rtx_REG (DImode, vms_save_fp_regno)));
8190 }
8191
8192 /* Restore the stack pointer. */
8193 emit_insn (gen_blockage ());
8194 if (sp_adj2 == const0_rtx)
8195 FRP (emit_move_insn (stack_pointer_rtx, sp_adj1));
8196 else
8197 FRP (emit_move_insn (stack_pointer_rtx,
8198 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)));
8199 }
8200 else
8201 {
8202 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8203 {
8204 emit_insn (gen_blockage ());
8205 FRP (emit_move_insn (hard_frame_pointer_rtx,
8206 gen_rtx_REG (DImode, vms_save_fp_regno)));
8207 }
8208 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8209 {
8210 /* Decrement the frame pointer if the function does not have a
8211 frame. */
8212
8213 emit_insn (gen_blockage ());
8214 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8215 hard_frame_pointer_rtx, constm1_rtx)));
8216 }
8217 }
8218 }
8219 \f
8220 /* Output the rest of the textual info surrounding the epilogue. */
8221
8222 void
8223 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8224 {
8225 rtx insn;
8226
8227 /* We output a nop after noreturn calls at the very end of the function to
8228 ensure that the return address always remains in the caller's code range,
8229 as not doing so might confuse unwinding engines. */
8230 insn = get_last_insn ();
8231 if (!INSN_P (insn))
8232 insn = prev_active_insn (insn);
8233 if (GET_CODE (insn) == CALL_INSN)
8234 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8235
8236 #if TARGET_ABI_OPEN_VMS
8237 alpha_write_linkage (file, fnname, decl);
8238 #endif
8239
8240 /* End the function. */
8241 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8242 {
8243 fputs ("\t.end ", file);
8244 assemble_name (file, fnname);
8245 putc ('\n', file);
8246 }
8247 inside_function = FALSE;
8248
8249 /* Output jump tables and the static subroutine information block. */
8250 if (TARGET_ABI_UNICOSMK)
8251 {
8252 unicosmk_output_ssib (file, fnname);
8253 unicosmk_output_deferred_case_vectors (file);
8254 }
8255 }
8256
8257 #if TARGET_ABI_OSF
8258 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8259
8260 In order to avoid the hordes of differences between generated code
8261 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8262 lots of code loading up large constants, generate rtl and emit it
8263 instead of going straight to text.
8264
8265 Not sure why this idea hasn't been explored before... */
8266
8267 static void
8268 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8269 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8270 tree function)
8271 {
8272 HOST_WIDE_INT hi, lo;
8273 rtx this, insn, funexp;
8274
8275 /* We always require a valid GP. */
8276 emit_insn (gen_prologue_ldgp ());
8277 emit_note (NOTE_INSN_PROLOGUE_END);
8278
8279 /* Find the "this" pointer. If the function returns a structure,
8280 the structure return pointer is in $16. */
8281 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8282 this = gen_rtx_REG (Pmode, 17);
8283 else
8284 this = gen_rtx_REG (Pmode, 16);
8285
8286 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8287 entire constant for the add. */
8288 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8289 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8290 if (hi + lo == delta)
8291 {
8292 if (hi)
8293 emit_insn (gen_adddi3 (this, this, GEN_INT (hi)));
8294 if (lo)
8295 emit_insn (gen_adddi3 (this, this, GEN_INT (lo)));
8296 }
8297 else
8298 {
8299 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8300 delta, -(delta < 0));
8301 emit_insn (gen_adddi3 (this, this, tmp));
8302 }
8303
8304 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8305 if (vcall_offset)
8306 {
8307 rtx tmp, tmp2;
8308
8309 tmp = gen_rtx_REG (Pmode, 0);
8310 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8311
8312 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8313 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8314 if (hi + lo == vcall_offset)
8315 {
8316 if (hi)
8317 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8318 }
8319 else
8320 {
8321 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8322 vcall_offset, -(vcall_offset < 0));
8323 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8324 lo = 0;
8325 }
8326 if (lo)
8327 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8328 else
8329 tmp2 = tmp;
8330 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8331
8332 emit_insn (gen_adddi3 (this, this, tmp));
8333 }
8334
8335 /* Generate a tail call to the target function. */
8336 if (! TREE_USED (function))
8337 {
8338 assemble_external (function);
8339 TREE_USED (function) = 1;
8340 }
8341 funexp = XEXP (DECL_RTL (function), 0);
8342 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8343 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8344 SIBLING_CALL_P (insn) = 1;
8345
8346 /* Run just enough of rest_of_compilation to get the insns emitted.
8347 There's not really enough bulk here to make other passes such as
8348 instruction scheduling worth while. Note that use_thunk calls
8349 assemble_start_function and assemble_end_function. */
8350 insn = get_insns ();
8351 insn_locators_alloc ();
8352 shorten_branches (insn);
8353 final_start_function (insn, file, 1);
8354 final (insn, file, 1);
8355 final_end_function ();
8356 }
8357 #endif /* TARGET_ABI_OSF */
8358 \f
8359 /* Debugging support. */
8360
8361 #include "gstab.h"
8362
8363 /* Count the number of sdb related labels are generated (to find block
8364 start and end boundaries). */
8365
8366 int sdb_label_count = 0;
8367
8368 /* Name of the file containing the current function. */
8369
8370 static const char *current_function_file = "";
8371
8372 /* Offsets to alpha virtual arg/local debugging pointers. */
8373
8374 long alpha_arg_offset;
8375 long alpha_auto_offset;
8376 \f
8377 /* Emit a new filename to a stream. */
8378
8379 void
8380 alpha_output_filename (FILE *stream, const char *name)
8381 {
8382 static int first_time = TRUE;
8383
8384 if (first_time)
8385 {
8386 first_time = FALSE;
8387 ++num_source_filenames;
8388 current_function_file = name;
8389 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8390 output_quoted_string (stream, name);
8391 fprintf (stream, "\n");
8392 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8393 fprintf (stream, "\t#@stabs\n");
8394 }
8395
8396 else if (write_symbols == DBX_DEBUG)
8397 /* dbxout.c will emit an appropriate .stabs directive. */
8398 return;
8399
8400 else if (name != current_function_file
8401 && strcmp (name, current_function_file) != 0)
8402 {
8403 if (inside_function && ! TARGET_GAS)
8404 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8405 else
8406 {
8407 ++num_source_filenames;
8408 current_function_file = name;
8409 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8410 }
8411
8412 output_quoted_string (stream, name);
8413 fprintf (stream, "\n");
8414 }
8415 }
8416 \f
8417 /* Structure to show the current status of registers and memory. */
8418
8419 struct shadow_summary
8420 {
8421 struct {
8422 unsigned int i : 31; /* Mask of int regs */
8423 unsigned int fp : 31; /* Mask of fp regs */
8424 unsigned int mem : 1; /* mem == imem | fpmem */
8425 } used, defd;
8426 };
8427
8428 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8429 to the summary structure. SET is nonzero if the insn is setting the
8430 object, otherwise zero. */
8431
8432 static void
8433 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8434 {
8435 const char *format_ptr;
8436 int i, j;
8437
8438 if (x == 0)
8439 return;
8440
8441 switch (GET_CODE (x))
8442 {
8443 /* ??? Note that this case would be incorrect if the Alpha had a
8444 ZERO_EXTRACT in SET_DEST. */
8445 case SET:
8446 summarize_insn (SET_SRC (x), sum, 0);
8447 summarize_insn (SET_DEST (x), sum, 1);
8448 break;
8449
8450 case CLOBBER:
8451 summarize_insn (XEXP (x, 0), sum, 1);
8452 break;
8453
8454 case USE:
8455 summarize_insn (XEXP (x, 0), sum, 0);
8456 break;
8457
8458 case ASM_OPERANDS:
8459 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8460 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8461 break;
8462
8463 case PARALLEL:
8464 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8465 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8466 break;
8467
8468 case SUBREG:
8469 summarize_insn (SUBREG_REG (x), sum, 0);
8470 break;
8471
8472 case REG:
8473 {
8474 int regno = REGNO (x);
8475 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8476
8477 if (regno == 31 || regno == 63)
8478 break;
8479
8480 if (set)
8481 {
8482 if (regno < 32)
8483 sum->defd.i |= mask;
8484 else
8485 sum->defd.fp |= mask;
8486 }
8487 else
8488 {
8489 if (regno < 32)
8490 sum->used.i |= mask;
8491 else
8492 sum->used.fp |= mask;
8493 }
8494 }
8495 break;
8496
8497 case MEM:
8498 if (set)
8499 sum->defd.mem = 1;
8500 else
8501 sum->used.mem = 1;
8502
8503 /* Find the regs used in memory address computation: */
8504 summarize_insn (XEXP (x, 0), sum, 0);
8505 break;
8506
8507 case CONST_INT: case CONST_DOUBLE:
8508 case SYMBOL_REF: case LABEL_REF: case CONST:
8509 case SCRATCH: case ASM_INPUT:
8510 break;
8511
8512 /* Handle common unary and binary ops for efficiency. */
8513 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8514 case MOD: case UDIV: case UMOD: case AND: case IOR:
8515 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8516 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8517 case NE: case EQ: case GE: case GT: case LE:
8518 case LT: case GEU: case GTU: case LEU: case LTU:
8519 summarize_insn (XEXP (x, 0), sum, 0);
8520 summarize_insn (XEXP (x, 1), sum, 0);
8521 break;
8522
8523 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8524 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8525 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8526 case SQRT: case FFS:
8527 summarize_insn (XEXP (x, 0), sum, 0);
8528 break;
8529
8530 default:
8531 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8532 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8533 switch (format_ptr[i])
8534 {
8535 case 'e':
8536 summarize_insn (XEXP (x, i), sum, 0);
8537 break;
8538
8539 case 'E':
8540 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8541 summarize_insn (XVECEXP (x, i, j), sum, 0);
8542 break;
8543
8544 case 'i':
8545 break;
8546
8547 default:
8548 gcc_unreachable ();
8549 }
8550 }
8551 }
8552
8553 /* Ensure a sufficient number of `trapb' insns are in the code when
8554 the user requests code with a trap precision of functions or
8555 instructions.
8556
8557 In naive mode, when the user requests a trap-precision of
8558 "instruction", a trapb is needed after every instruction that may
8559 generate a trap. This ensures that the code is resumption safe but
8560 it is also slow.
8561
8562 When optimizations are turned on, we delay issuing a trapb as long
8563 as possible. In this context, a trap shadow is the sequence of
8564 instructions that starts with a (potentially) trap generating
8565 instruction and extends to the next trapb or call_pal instruction
8566 (but GCC never generates call_pal by itself). We can delay (and
8567 therefore sometimes omit) a trapb subject to the following
8568 conditions:
8569
8570 (a) On entry to the trap shadow, if any Alpha register or memory
8571 location contains a value that is used as an operand value by some
8572 instruction in the trap shadow (live on entry), then no instruction
8573 in the trap shadow may modify the register or memory location.
8574
8575 (b) Within the trap shadow, the computation of the base register
8576 for a memory load or store instruction may not involve using the
8577 result of an instruction that might generate an UNPREDICTABLE
8578 result.
8579
8580 (c) Within the trap shadow, no register may be used more than once
8581 as a destination register. (This is to make life easier for the
8582 trap-handler.)
8583
8584 (d) The trap shadow may not include any branch instructions. */
8585
8586 static void
8587 alpha_handle_trap_shadows (void)
8588 {
8589 struct shadow_summary shadow;
8590 int trap_pending, exception_nesting;
8591 rtx i, n;
8592
8593 trap_pending = 0;
8594 exception_nesting = 0;
8595 shadow.used.i = 0;
8596 shadow.used.fp = 0;
8597 shadow.used.mem = 0;
8598 shadow.defd = shadow.used;
8599
8600 for (i = get_insns (); i ; i = NEXT_INSN (i))
8601 {
8602 if (GET_CODE (i) == NOTE)
8603 {
8604 switch (NOTE_KIND (i))
8605 {
8606 case NOTE_INSN_EH_REGION_BEG:
8607 exception_nesting++;
8608 if (trap_pending)
8609 goto close_shadow;
8610 break;
8611
8612 case NOTE_INSN_EH_REGION_END:
8613 exception_nesting--;
8614 if (trap_pending)
8615 goto close_shadow;
8616 break;
8617
8618 case NOTE_INSN_EPILOGUE_BEG:
8619 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8620 goto close_shadow;
8621 break;
8622 }
8623 }
8624 else if (trap_pending)
8625 {
8626 if (alpha_tp == ALPHA_TP_FUNC)
8627 {
8628 if (GET_CODE (i) == JUMP_INSN
8629 && GET_CODE (PATTERN (i)) == RETURN)
8630 goto close_shadow;
8631 }
8632 else if (alpha_tp == ALPHA_TP_INSN)
8633 {
8634 if (optimize > 0)
8635 {
8636 struct shadow_summary sum;
8637
8638 sum.used.i = 0;
8639 sum.used.fp = 0;
8640 sum.used.mem = 0;
8641 sum.defd = sum.used;
8642
8643 switch (GET_CODE (i))
8644 {
8645 case INSN:
8646 /* Annoyingly, get_attr_trap will die on these. */
8647 if (GET_CODE (PATTERN (i)) == USE
8648 || GET_CODE (PATTERN (i)) == CLOBBER)
8649 break;
8650
8651 summarize_insn (PATTERN (i), &sum, 0);
8652
8653 if ((sum.defd.i & shadow.defd.i)
8654 || (sum.defd.fp & shadow.defd.fp))
8655 {
8656 /* (c) would be violated */
8657 goto close_shadow;
8658 }
8659
8660 /* Combine shadow with summary of current insn: */
8661 shadow.used.i |= sum.used.i;
8662 shadow.used.fp |= sum.used.fp;
8663 shadow.used.mem |= sum.used.mem;
8664 shadow.defd.i |= sum.defd.i;
8665 shadow.defd.fp |= sum.defd.fp;
8666 shadow.defd.mem |= sum.defd.mem;
8667
8668 if ((sum.defd.i & shadow.used.i)
8669 || (sum.defd.fp & shadow.used.fp)
8670 || (sum.defd.mem & shadow.used.mem))
8671 {
8672 /* (a) would be violated (also takes care of (b)) */
8673 gcc_assert (get_attr_trap (i) != TRAP_YES
8674 || (!(sum.defd.i & sum.used.i)
8675 && !(sum.defd.fp & sum.used.fp)));
8676
8677 goto close_shadow;
8678 }
8679 break;
8680
8681 case JUMP_INSN:
8682 case CALL_INSN:
8683 case CODE_LABEL:
8684 goto close_shadow;
8685
8686 default:
8687 gcc_unreachable ();
8688 }
8689 }
8690 else
8691 {
8692 close_shadow:
8693 n = emit_insn_before (gen_trapb (), i);
8694 PUT_MODE (n, TImode);
8695 PUT_MODE (i, TImode);
8696 trap_pending = 0;
8697 shadow.used.i = 0;
8698 shadow.used.fp = 0;
8699 shadow.used.mem = 0;
8700 shadow.defd = shadow.used;
8701 }
8702 }
8703 }
8704
8705 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8706 && GET_CODE (i) == INSN
8707 && GET_CODE (PATTERN (i)) != USE
8708 && GET_CODE (PATTERN (i)) != CLOBBER
8709 && get_attr_trap (i) == TRAP_YES)
8710 {
8711 if (optimize && !trap_pending)
8712 summarize_insn (PATTERN (i), &shadow, 0);
8713 trap_pending = 1;
8714 }
8715 }
8716 }
8717 \f
8718 /* Alpha can only issue instruction groups simultaneously if they are
8719 suitably aligned. This is very processor-specific. */
8720 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8721 that are marked "fake". These instructions do not exist on that target,
8722 but it is possible to see these insns with deranged combinations of
8723 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8724 choose a result at random. */
8725
8726 enum alphaev4_pipe {
8727 EV4_STOP = 0,
8728 EV4_IB0 = 1,
8729 EV4_IB1 = 2,
8730 EV4_IBX = 4
8731 };
8732
8733 enum alphaev5_pipe {
8734 EV5_STOP = 0,
8735 EV5_NONE = 1,
8736 EV5_E01 = 2,
8737 EV5_E0 = 4,
8738 EV5_E1 = 8,
8739 EV5_FAM = 16,
8740 EV5_FA = 32,
8741 EV5_FM = 64
8742 };
8743
8744 static enum alphaev4_pipe
8745 alphaev4_insn_pipe (rtx insn)
8746 {
8747 if (recog_memoized (insn) < 0)
8748 return EV4_STOP;
8749 if (get_attr_length (insn) != 4)
8750 return EV4_STOP;
8751
8752 switch (get_attr_type (insn))
8753 {
8754 case TYPE_ILD:
8755 case TYPE_LDSYM:
8756 case TYPE_FLD:
8757 case TYPE_LD_L:
8758 return EV4_IBX;
8759
8760 case TYPE_IADD:
8761 case TYPE_ILOG:
8762 case TYPE_ICMOV:
8763 case TYPE_ICMP:
8764 case TYPE_FST:
8765 case TYPE_SHIFT:
8766 case TYPE_IMUL:
8767 case TYPE_FBR:
8768 case TYPE_MVI: /* fake */
8769 return EV4_IB0;
8770
8771 case TYPE_IST:
8772 case TYPE_MISC:
8773 case TYPE_IBR:
8774 case TYPE_JSR:
8775 case TYPE_CALLPAL:
8776 case TYPE_FCPYS:
8777 case TYPE_FCMOV:
8778 case TYPE_FADD:
8779 case TYPE_FDIV:
8780 case TYPE_FMUL:
8781 case TYPE_ST_C:
8782 case TYPE_MB:
8783 case TYPE_FSQRT: /* fake */
8784 case TYPE_FTOI: /* fake */
8785 case TYPE_ITOF: /* fake */
8786 return EV4_IB1;
8787
8788 default:
8789 gcc_unreachable ();
8790 }
8791 }
8792
8793 static enum alphaev5_pipe
8794 alphaev5_insn_pipe (rtx insn)
8795 {
8796 if (recog_memoized (insn) < 0)
8797 return EV5_STOP;
8798 if (get_attr_length (insn) != 4)
8799 return EV5_STOP;
8800
8801 switch (get_attr_type (insn))
8802 {
8803 case TYPE_ILD:
8804 case TYPE_FLD:
8805 case TYPE_LDSYM:
8806 case TYPE_IADD:
8807 case TYPE_ILOG:
8808 case TYPE_ICMOV:
8809 case TYPE_ICMP:
8810 return EV5_E01;
8811
8812 case TYPE_IST:
8813 case TYPE_FST:
8814 case TYPE_SHIFT:
8815 case TYPE_IMUL:
8816 case TYPE_MISC:
8817 case TYPE_MVI:
8818 case TYPE_LD_L:
8819 case TYPE_ST_C:
8820 case TYPE_MB:
8821 case TYPE_FTOI: /* fake */
8822 case TYPE_ITOF: /* fake */
8823 return EV5_E0;
8824
8825 case TYPE_IBR:
8826 case TYPE_JSR:
8827 case TYPE_CALLPAL:
8828 return EV5_E1;
8829
8830 case TYPE_FCPYS:
8831 return EV5_FAM;
8832
8833 case TYPE_FBR:
8834 case TYPE_FCMOV:
8835 case TYPE_FADD:
8836 case TYPE_FDIV:
8837 case TYPE_FSQRT: /* fake */
8838 return EV5_FA;
8839
8840 case TYPE_FMUL:
8841 return EV5_FM;
8842
8843 default:
8844 gcc_unreachable ();
8845 }
8846 }
8847
8848 /* IN_USE is a mask of the slots currently filled within the insn group.
8849 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8850 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8851
8852 LEN is, of course, the length of the group in bytes. */
8853
8854 static rtx
8855 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8856 {
8857 int len, in_use;
8858
8859 len = in_use = 0;
8860
8861 if (! INSN_P (insn)
8862 || GET_CODE (PATTERN (insn)) == CLOBBER
8863 || GET_CODE (PATTERN (insn)) == USE)
8864 goto next_and_done;
8865
8866 while (1)
8867 {
8868 enum alphaev4_pipe pipe;
8869
8870 pipe = alphaev4_insn_pipe (insn);
8871 switch (pipe)
8872 {
8873 case EV4_STOP:
8874 /* Force complex instructions to start new groups. */
8875 if (in_use)
8876 goto done;
8877
8878 /* If this is a completely unrecognized insn, it's an asm.
8879 We don't know how long it is, so record length as -1 to
8880 signal a needed realignment. */
8881 if (recog_memoized (insn) < 0)
8882 len = -1;
8883 else
8884 len = get_attr_length (insn);
8885 goto next_and_done;
8886
8887 case EV4_IBX:
8888 if (in_use & EV4_IB0)
8889 {
8890 if (in_use & EV4_IB1)
8891 goto done;
8892 in_use |= EV4_IB1;
8893 }
8894 else
8895 in_use |= EV4_IB0 | EV4_IBX;
8896 break;
8897
8898 case EV4_IB0:
8899 if (in_use & EV4_IB0)
8900 {
8901 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8902 goto done;
8903 in_use |= EV4_IB1;
8904 }
8905 in_use |= EV4_IB0;
8906 break;
8907
8908 case EV4_IB1:
8909 if (in_use & EV4_IB1)
8910 goto done;
8911 in_use |= EV4_IB1;
8912 break;
8913
8914 default:
8915 gcc_unreachable ();
8916 }
8917 len += 4;
8918
8919 /* Haifa doesn't do well scheduling branches. */
8920 if (GET_CODE (insn) == JUMP_INSN)
8921 goto next_and_done;
8922
8923 next:
8924 insn = next_nonnote_insn (insn);
8925
8926 if (!insn || ! INSN_P (insn))
8927 goto done;
8928
8929 /* Let Haifa tell us where it thinks insn group boundaries are. */
8930 if (GET_MODE (insn) == TImode)
8931 goto done;
8932
8933 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
8934 goto next;
8935 }
8936
8937 next_and_done:
8938 insn = next_nonnote_insn (insn);
8939
8940 done:
8941 *plen = len;
8942 *pin_use = in_use;
8943 return insn;
8944 }
8945
8946 /* IN_USE is a mask of the slots currently filled within the insn group.
8947 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
8948 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
8949
8950 LEN is, of course, the length of the group in bytes. */
8951
8952 static rtx
8953 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
8954 {
8955 int len, in_use;
8956
8957 len = in_use = 0;
8958
8959 if (! INSN_P (insn)
8960 || GET_CODE (PATTERN (insn)) == CLOBBER
8961 || GET_CODE (PATTERN (insn)) == USE)
8962 goto next_and_done;
8963
8964 while (1)
8965 {
8966 enum alphaev5_pipe pipe;
8967
8968 pipe = alphaev5_insn_pipe (insn);
8969 switch (pipe)
8970 {
8971 case EV5_STOP:
8972 /* Force complex instructions to start new groups. */
8973 if (in_use)
8974 goto done;
8975
8976 /* If this is a completely unrecognized insn, it's an asm.
8977 We don't know how long it is, so record length as -1 to
8978 signal a needed realignment. */
8979 if (recog_memoized (insn) < 0)
8980 len = -1;
8981 else
8982 len = get_attr_length (insn);
8983 goto next_and_done;
8984
8985 /* ??? Most of the places below, we would like to assert never
8986 happen, as it would indicate an error either in Haifa, or
8987 in the scheduling description. Unfortunately, Haifa never
8988 schedules the last instruction of the BB, so we don't have
8989 an accurate TI bit to go off. */
8990 case EV5_E01:
8991 if (in_use & EV5_E0)
8992 {
8993 if (in_use & EV5_E1)
8994 goto done;
8995 in_use |= EV5_E1;
8996 }
8997 else
8998 in_use |= EV5_E0 | EV5_E01;
8999 break;
9000
9001 case EV5_E0:
9002 if (in_use & EV5_E0)
9003 {
9004 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9005 goto done;
9006 in_use |= EV5_E1;
9007 }
9008 in_use |= EV5_E0;
9009 break;
9010
9011 case EV5_E1:
9012 if (in_use & EV5_E1)
9013 goto done;
9014 in_use |= EV5_E1;
9015 break;
9016
9017 case EV5_FAM:
9018 if (in_use & EV5_FA)
9019 {
9020 if (in_use & EV5_FM)
9021 goto done;
9022 in_use |= EV5_FM;
9023 }
9024 else
9025 in_use |= EV5_FA | EV5_FAM;
9026 break;
9027
9028 case EV5_FA:
9029 if (in_use & EV5_FA)
9030 goto done;
9031 in_use |= EV5_FA;
9032 break;
9033
9034 case EV5_FM:
9035 if (in_use & EV5_FM)
9036 goto done;
9037 in_use |= EV5_FM;
9038 break;
9039
9040 case EV5_NONE:
9041 break;
9042
9043 default:
9044 gcc_unreachable ();
9045 }
9046 len += 4;
9047
9048 /* Haifa doesn't do well scheduling branches. */
9049 /* ??? If this is predicted not-taken, slotting continues, except
9050 that no more IBR, FBR, or JSR insns may be slotted. */
9051 if (GET_CODE (insn) == JUMP_INSN)
9052 goto next_and_done;
9053
9054 next:
9055 insn = next_nonnote_insn (insn);
9056
9057 if (!insn || ! INSN_P (insn))
9058 goto done;
9059
9060 /* Let Haifa tell us where it thinks insn group boundaries are. */
9061 if (GET_MODE (insn) == TImode)
9062 goto done;
9063
9064 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9065 goto next;
9066 }
9067
9068 next_and_done:
9069 insn = next_nonnote_insn (insn);
9070
9071 done:
9072 *plen = len;
9073 *pin_use = in_use;
9074 return insn;
9075 }
9076
9077 static rtx
9078 alphaev4_next_nop (int *pin_use)
9079 {
9080 int in_use = *pin_use;
9081 rtx nop;
9082
9083 if (!(in_use & EV4_IB0))
9084 {
9085 in_use |= EV4_IB0;
9086 nop = gen_nop ();
9087 }
9088 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9089 {
9090 in_use |= EV4_IB1;
9091 nop = gen_nop ();
9092 }
9093 else if (TARGET_FP && !(in_use & EV4_IB1))
9094 {
9095 in_use |= EV4_IB1;
9096 nop = gen_fnop ();
9097 }
9098 else
9099 nop = gen_unop ();
9100
9101 *pin_use = in_use;
9102 return nop;
9103 }
9104
9105 static rtx
9106 alphaev5_next_nop (int *pin_use)
9107 {
9108 int in_use = *pin_use;
9109 rtx nop;
9110
9111 if (!(in_use & EV5_E1))
9112 {
9113 in_use |= EV5_E1;
9114 nop = gen_nop ();
9115 }
9116 else if (TARGET_FP && !(in_use & EV5_FA))
9117 {
9118 in_use |= EV5_FA;
9119 nop = gen_fnop ();
9120 }
9121 else if (TARGET_FP && !(in_use & EV5_FM))
9122 {
9123 in_use |= EV5_FM;
9124 nop = gen_fnop ();
9125 }
9126 else
9127 nop = gen_unop ();
9128
9129 *pin_use = in_use;
9130 return nop;
9131 }
9132
9133 /* The instruction group alignment main loop. */
9134
9135 static void
9136 alpha_align_insns (unsigned int max_align,
9137 rtx (*next_group) (rtx, int *, int *),
9138 rtx (*next_nop) (int *))
9139 {
9140 /* ALIGN is the known alignment for the insn group. */
9141 unsigned int align;
9142 /* OFS is the offset of the current insn in the insn group. */
9143 int ofs;
9144 int prev_in_use, in_use, len, ldgp;
9145 rtx i, next;
9146
9147 /* Let shorten branches care for assigning alignments to code labels. */
9148 shorten_branches (get_insns ());
9149
9150 if (align_functions < 4)
9151 align = 4;
9152 else if ((unsigned int) align_functions < max_align)
9153 align = align_functions;
9154 else
9155 align = max_align;
9156
9157 ofs = prev_in_use = 0;
9158 i = get_insns ();
9159 if (GET_CODE (i) == NOTE)
9160 i = next_nonnote_insn (i);
9161
9162 ldgp = alpha_function_needs_gp ? 8 : 0;
9163
9164 while (i)
9165 {
9166 next = (*next_group) (i, &in_use, &len);
9167
9168 /* When we see a label, resync alignment etc. */
9169 if (GET_CODE (i) == CODE_LABEL)
9170 {
9171 unsigned int new_align = 1 << label_to_alignment (i);
9172
9173 if (new_align >= align)
9174 {
9175 align = new_align < max_align ? new_align : max_align;
9176 ofs = 0;
9177 }
9178
9179 else if (ofs & (new_align-1))
9180 ofs = (ofs | (new_align-1)) + 1;
9181 gcc_assert (!len);
9182 }
9183
9184 /* Handle complex instructions special. */
9185 else if (in_use == 0)
9186 {
9187 /* Asms will have length < 0. This is a signal that we have
9188 lost alignment knowledge. Assume, however, that the asm
9189 will not mis-align instructions. */
9190 if (len < 0)
9191 {
9192 ofs = 0;
9193 align = 4;
9194 len = 0;
9195 }
9196 }
9197
9198 /* If the known alignment is smaller than the recognized insn group,
9199 realign the output. */
9200 else if ((int) align < len)
9201 {
9202 unsigned int new_log_align = len > 8 ? 4 : 3;
9203 rtx prev, where;
9204
9205 where = prev = prev_nonnote_insn (i);
9206 if (!where || GET_CODE (where) != CODE_LABEL)
9207 where = i;
9208
9209 /* Can't realign between a call and its gp reload. */
9210 if (! (TARGET_EXPLICIT_RELOCS
9211 && prev && GET_CODE (prev) == CALL_INSN))
9212 {
9213 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9214 align = 1 << new_log_align;
9215 ofs = 0;
9216 }
9217 }
9218
9219 /* We may not insert padding inside the initial ldgp sequence. */
9220 else if (ldgp > 0)
9221 ldgp -= len;
9222
9223 /* If the group won't fit in the same INT16 as the previous,
9224 we need to add padding to keep the group together. Rather
9225 than simply leaving the insn filling to the assembler, we
9226 can make use of the knowledge of what sorts of instructions
9227 were issued in the previous group to make sure that all of
9228 the added nops are really free. */
9229 else if (ofs + len > (int) align)
9230 {
9231 int nop_count = (align - ofs) / 4;
9232 rtx where;
9233
9234 /* Insert nops before labels, branches, and calls to truly merge
9235 the execution of the nops with the previous instruction group. */
9236 where = prev_nonnote_insn (i);
9237 if (where)
9238 {
9239 if (GET_CODE (where) == CODE_LABEL)
9240 {
9241 rtx where2 = prev_nonnote_insn (where);
9242 if (where2 && GET_CODE (where2) == JUMP_INSN)
9243 where = where2;
9244 }
9245 else if (GET_CODE (where) == INSN)
9246 where = i;
9247 }
9248 else
9249 where = i;
9250
9251 do
9252 emit_insn_before ((*next_nop)(&prev_in_use), where);
9253 while (--nop_count);
9254 ofs = 0;
9255 }
9256
9257 ofs = (ofs + len) & (align - 1);
9258 prev_in_use = in_use;
9259 i = next;
9260 }
9261 }
9262 \f
9263 /* Machine dependent reorg pass. */
9264
9265 static void
9266 alpha_reorg (void)
9267 {
9268 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9269 alpha_handle_trap_shadows ();
9270
9271 /* Due to the number of extra trapb insns, don't bother fixing up
9272 alignment when trap precision is instruction. Moreover, we can
9273 only do our job when sched2 is run. */
9274 if (optimize && !optimize_size
9275 && alpha_tp != ALPHA_TP_INSN
9276 && flag_schedule_insns_after_reload)
9277 {
9278 if (alpha_tune == PROCESSOR_EV4)
9279 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9280 else if (alpha_tune == PROCESSOR_EV5)
9281 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9282 }
9283 }
9284 \f
9285 #if !TARGET_ABI_UNICOSMK
9286
9287 #ifdef HAVE_STAMP_H
9288 #include <stamp.h>
9289 #endif
9290
9291 static void
9292 alpha_file_start (void)
9293 {
9294 #ifdef OBJECT_FORMAT_ELF
9295 /* If emitting dwarf2 debug information, we cannot generate a .file
9296 directive to start the file, as it will conflict with dwarf2out
9297 file numbers. So it's only useful when emitting mdebug output. */
9298 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9299 #endif
9300
9301 default_file_start ();
9302 #ifdef MS_STAMP
9303 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9304 #endif
9305
9306 fputs ("\t.set noreorder\n", asm_out_file);
9307 fputs ("\t.set volatile\n", asm_out_file);
9308 if (!TARGET_ABI_OPEN_VMS)
9309 fputs ("\t.set noat\n", asm_out_file);
9310 if (TARGET_EXPLICIT_RELOCS)
9311 fputs ("\t.set nomacro\n", asm_out_file);
9312 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9313 {
9314 const char *arch;
9315
9316 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9317 arch = "ev6";
9318 else if (TARGET_MAX)
9319 arch = "pca56";
9320 else if (TARGET_BWX)
9321 arch = "ev56";
9322 else if (alpha_cpu == PROCESSOR_EV5)
9323 arch = "ev5";
9324 else
9325 arch = "ev4";
9326
9327 fprintf (asm_out_file, "\t.arch %s\n", arch);
9328 }
9329 }
9330 #endif
9331
9332 #ifdef OBJECT_FORMAT_ELF
9333 /* Since we don't have a .dynbss section, we should not allow global
9334 relocations in the .rodata section. */
9335
9336 static int
9337 alpha_elf_reloc_rw_mask (void)
9338 {
9339 return flag_pic ? 3 : 2;
9340 }
9341
9342 /* Return a section for X. The only special thing we do here is to
9343 honor small data. */
9344
9345 static section *
9346 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9347 unsigned HOST_WIDE_INT align)
9348 {
9349 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9350 /* ??? Consider using mergeable sdata sections. */
9351 return sdata_section;
9352 else
9353 return default_elf_select_rtx_section (mode, x, align);
9354 }
9355
9356 static unsigned int
9357 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9358 {
9359 unsigned int flags = 0;
9360
9361 if (strcmp (name, ".sdata") == 0
9362 || strncmp (name, ".sdata.", 7) == 0
9363 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9364 || strcmp (name, ".sbss") == 0
9365 || strncmp (name, ".sbss.", 6) == 0
9366 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9367 flags = SECTION_SMALL;
9368
9369 flags |= default_section_type_flags (decl, name, reloc);
9370 return flags;
9371 }
9372 #endif /* OBJECT_FORMAT_ELF */
9373 \f
9374 /* Structure to collect function names for final output in link section. */
9375 /* Note that items marked with GTY can't be ifdef'ed out. */
9376
9377 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9378 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9379
9380 struct alpha_links GTY(())
9381 {
9382 int num;
9383 rtx linkage;
9384 enum links_kind lkind;
9385 enum reloc_kind rkind;
9386 };
9387
9388 struct alpha_funcs GTY(())
9389 {
9390 int num;
9391 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9392 links;
9393 };
9394
9395 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9396 splay_tree alpha_links_tree;
9397 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9398 splay_tree alpha_funcs_tree;
9399
9400 static GTY(()) int alpha_funcs_num;
9401
9402 #if TARGET_ABI_OPEN_VMS
9403
9404 /* Return the VMS argument type corresponding to MODE. */
9405
9406 enum avms_arg_type
9407 alpha_arg_type (enum machine_mode mode)
9408 {
9409 switch (mode)
9410 {
9411 case SFmode:
9412 return TARGET_FLOAT_VAX ? FF : FS;
9413 case DFmode:
9414 return TARGET_FLOAT_VAX ? FD : FT;
9415 default:
9416 return I64;
9417 }
9418 }
9419
9420 /* Return an rtx for an integer representing the VMS Argument Information
9421 register value. */
9422
9423 rtx
9424 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9425 {
9426 unsigned HOST_WIDE_INT regval = cum.num_args;
9427 int i;
9428
9429 for (i = 0; i < 6; i++)
9430 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9431
9432 return GEN_INT (regval);
9433 }
9434 \f
9435 /* Make (or fake) .linkage entry for function call.
9436
9437 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9438
9439 Return an SYMBOL_REF rtx for the linkage. */
9440
9441 rtx
9442 alpha_need_linkage (const char *name, int is_local)
9443 {
9444 splay_tree_node node;
9445 struct alpha_links *al;
9446
9447 if (name[0] == '*')
9448 name++;
9449
9450 if (is_local)
9451 {
9452 struct alpha_funcs *cfaf;
9453
9454 if (!alpha_funcs_tree)
9455 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9456 splay_tree_compare_pointers);
9457
9458 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9459
9460 cfaf->links = 0;
9461 cfaf->num = ++alpha_funcs_num;
9462
9463 splay_tree_insert (alpha_funcs_tree,
9464 (splay_tree_key) current_function_decl,
9465 (splay_tree_value) cfaf);
9466 }
9467
9468 if (alpha_links_tree)
9469 {
9470 /* Is this name already defined? */
9471
9472 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9473 if (node)
9474 {
9475 al = (struct alpha_links *) node->value;
9476 if (is_local)
9477 {
9478 /* Defined here but external assumed. */
9479 if (al->lkind == KIND_EXTERN)
9480 al->lkind = KIND_LOCAL;
9481 }
9482 else
9483 {
9484 /* Used here but unused assumed. */
9485 if (al->lkind == KIND_UNUSED)
9486 al->lkind = KIND_LOCAL;
9487 }
9488 return al->linkage;
9489 }
9490 }
9491 else
9492 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9493
9494 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9495 name = ggc_strdup (name);
9496
9497 /* Assume external if no definition. */
9498 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9499
9500 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9501 get_identifier (name);
9502
9503 /* Construct a SYMBOL_REF for us to call. */
9504 {
9505 size_t name_len = strlen (name);
9506 char *linksym = alloca (name_len + 6);
9507 linksym[0] = '$';
9508 memcpy (linksym + 1, name, name_len);
9509 memcpy (linksym + 1 + name_len, "..lk", 5);
9510 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9511 ggc_alloc_string (linksym, name_len + 5));
9512 }
9513
9514 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9515 (splay_tree_value) al);
9516
9517 return al->linkage;
9518 }
9519
9520 rtx
9521 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9522 {
9523 splay_tree_node cfunnode;
9524 struct alpha_funcs *cfaf;
9525 struct alpha_links *al;
9526 const char *name = XSTR (linkage, 0);
9527
9528 cfaf = (struct alpha_funcs *) 0;
9529 al = (struct alpha_links *) 0;
9530
9531 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9532 cfaf = (struct alpha_funcs *) cfunnode->value;
9533
9534 if (cfaf->links)
9535 {
9536 splay_tree_node lnode;
9537
9538 /* Is this name already defined? */
9539
9540 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9541 if (lnode)
9542 al = (struct alpha_links *) lnode->value;
9543 }
9544 else
9545 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9546
9547 if (!al)
9548 {
9549 size_t name_len;
9550 size_t buflen;
9551 char buf [512];
9552 char *linksym;
9553 splay_tree_node node = 0;
9554 struct alpha_links *anl;
9555
9556 if (name[0] == '*')
9557 name++;
9558
9559 name_len = strlen (name);
9560
9561 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9562 al->num = cfaf->num;
9563
9564 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9565 if (node)
9566 {
9567 anl = (struct alpha_links *) node->value;
9568 al->lkind = anl->lkind;
9569 }
9570
9571 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9572 buflen = strlen (buf);
9573 linksym = alloca (buflen + 1);
9574 memcpy (linksym, buf, buflen + 1);
9575
9576 al->linkage = gen_rtx_SYMBOL_REF
9577 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9578
9579 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9580 (splay_tree_value) al);
9581 }
9582
9583 if (rflag)
9584 al->rkind = KIND_CODEADDR;
9585 else
9586 al->rkind = KIND_LINKAGE;
9587
9588 if (lflag)
9589 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9590 else
9591 return al->linkage;
9592 }
9593
9594 static int
9595 alpha_write_one_linkage (splay_tree_node node, void *data)
9596 {
9597 const char *const name = (const char *) node->key;
9598 struct alpha_links *link = (struct alpha_links *) node->value;
9599 FILE *stream = (FILE *) data;
9600
9601 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9602 if (link->rkind == KIND_CODEADDR)
9603 {
9604 if (link->lkind == KIND_LOCAL)
9605 {
9606 /* Local and used */
9607 fprintf (stream, "\t.quad %s..en\n", name);
9608 }
9609 else
9610 {
9611 /* External and used, request code address. */
9612 fprintf (stream, "\t.code_address %s\n", name);
9613 }
9614 }
9615 else
9616 {
9617 if (link->lkind == KIND_LOCAL)
9618 {
9619 /* Local and used, build linkage pair. */
9620 fprintf (stream, "\t.quad %s..en\n", name);
9621 fprintf (stream, "\t.quad %s\n", name);
9622 }
9623 else
9624 {
9625 /* External and used, request linkage pair. */
9626 fprintf (stream, "\t.linkage %s\n", name);
9627 }
9628 }
9629
9630 return 0;
9631 }
9632
9633 static void
9634 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9635 {
9636 splay_tree_node node;
9637 struct alpha_funcs *func;
9638
9639 fprintf (stream, "\t.link\n");
9640 fprintf (stream, "\t.align 3\n");
9641 in_section = NULL;
9642
9643 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9644 func = (struct alpha_funcs *) node->value;
9645
9646 fputs ("\t.name ", stream);
9647 assemble_name (stream, funname);
9648 fputs ("..na\n", stream);
9649 ASM_OUTPUT_LABEL (stream, funname);
9650 fprintf (stream, "\t.pdesc ");
9651 assemble_name (stream, funname);
9652 fprintf (stream, "..en,%s\n",
9653 alpha_procedure_type == PT_STACK ? "stack"
9654 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9655
9656 if (func->links)
9657 {
9658 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9659 /* splay_tree_delete (func->links); */
9660 }
9661 }
9662
9663 /* Given a decl, a section name, and whether the decl initializer
9664 has relocs, choose attributes for the section. */
9665
9666 #define SECTION_VMS_OVERLAY SECTION_FORGET
9667 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9668 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9669
9670 static unsigned int
9671 vms_section_type_flags (tree decl, const char *name, int reloc)
9672 {
9673 unsigned int flags = default_section_type_flags (decl, name, reloc);
9674
9675 if (decl && DECL_ATTRIBUTES (decl)
9676 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9677 flags |= SECTION_VMS_OVERLAY;
9678 if (decl && DECL_ATTRIBUTES (decl)
9679 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9680 flags |= SECTION_VMS_GLOBAL;
9681 if (decl && DECL_ATTRIBUTES (decl)
9682 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9683 flags |= SECTION_VMS_INITIALIZE;
9684
9685 return flags;
9686 }
9687
9688 /* Switch to an arbitrary section NAME with attributes as specified
9689 by FLAGS. ALIGN specifies any known alignment requirements for
9690 the section; 0 if the default should be used. */
9691
9692 static void
9693 vms_asm_named_section (const char *name, unsigned int flags,
9694 tree decl ATTRIBUTE_UNUSED)
9695 {
9696 fputc ('\n', asm_out_file);
9697 fprintf (asm_out_file, ".section\t%s", name);
9698
9699 if (flags & SECTION_VMS_OVERLAY)
9700 fprintf (asm_out_file, ",OVR");
9701 if (flags & SECTION_VMS_GLOBAL)
9702 fprintf (asm_out_file, ",GBL");
9703 if (flags & SECTION_VMS_INITIALIZE)
9704 fprintf (asm_out_file, ",NOMOD");
9705 if (flags & SECTION_DEBUG)
9706 fprintf (asm_out_file, ",NOWRT");
9707
9708 fputc ('\n', asm_out_file);
9709 }
9710
9711 /* Record an element in the table of global constructors. SYMBOL is
9712 a SYMBOL_REF of the function to be called; PRIORITY is a number
9713 between 0 and MAX_INIT_PRIORITY.
9714
9715 Differs from default_ctors_section_asm_out_constructor in that the
9716 width of the .ctors entry is always 64 bits, rather than the 32 bits
9717 used by a normal pointer. */
9718
9719 static void
9720 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9721 {
9722 switch_to_section (ctors_section);
9723 assemble_align (BITS_PER_WORD);
9724 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9725 }
9726
9727 static void
9728 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9729 {
9730 switch_to_section (dtors_section);
9731 assemble_align (BITS_PER_WORD);
9732 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9733 }
9734 #else
9735
9736 rtx
9737 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9738 int is_local ATTRIBUTE_UNUSED)
9739 {
9740 return NULL_RTX;
9741 }
9742
9743 rtx
9744 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9745 tree cfundecl ATTRIBUTE_UNUSED,
9746 int lflag ATTRIBUTE_UNUSED,
9747 int rflag ATTRIBUTE_UNUSED)
9748 {
9749 return NULL_RTX;
9750 }
9751
9752 #endif /* TARGET_ABI_OPEN_VMS */
9753 \f
9754 #if TARGET_ABI_UNICOSMK
9755
9756 /* This evaluates to true if we do not know how to pass TYPE solely in
9757 registers. This is the case for all arguments that do not fit in two
9758 registers. */
9759
9760 static bool
9761 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9762 {
9763 if (type == NULL)
9764 return false;
9765
9766 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9767 return true;
9768 if (TREE_ADDRESSABLE (type))
9769 return true;
9770
9771 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9772 }
9773
9774 /* Define the offset between two registers, one to be eliminated, and the
9775 other its replacement, at the start of a routine. */
9776
9777 int
9778 unicosmk_initial_elimination_offset (int from, int to)
9779 {
9780 int fixed_size;
9781
9782 fixed_size = alpha_sa_size();
9783 if (fixed_size != 0)
9784 fixed_size += 48;
9785
9786 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9787 return -fixed_size;
9788 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9789 return 0;
9790 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9791 return (ALPHA_ROUND (current_function_outgoing_args_size)
9792 + ALPHA_ROUND (get_frame_size()));
9793 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9794 return (ALPHA_ROUND (fixed_size)
9795 + ALPHA_ROUND (get_frame_size()
9796 + current_function_outgoing_args_size));
9797 else
9798 gcc_unreachable ();
9799 }
9800
9801 /* Output the module name for .ident and .end directives. We have to strip
9802 directories and add make sure that the module name starts with a letter
9803 or '$'. */
9804
9805 static void
9806 unicosmk_output_module_name (FILE *file)
9807 {
9808 const char *name = lbasename (main_input_filename);
9809 unsigned len = strlen (name);
9810 char *clean_name = alloca (len + 2);
9811 char *ptr = clean_name;
9812
9813 /* CAM only accepts module names that start with a letter or '$'. We
9814 prefix the module name with a '$' if necessary. */
9815
9816 if (!ISALPHA (*name))
9817 *ptr++ = '$';
9818 memcpy (ptr, name, len + 1);
9819 clean_symbol_name (clean_name);
9820 fputs (clean_name, file);
9821 }
9822
9823 /* Output the definition of a common variable. */
9824
9825 void
9826 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9827 {
9828 tree name_tree;
9829 printf ("T3E__: common %s\n", name);
9830
9831 in_section = NULL;
9832 fputs("\t.endp\n\n\t.psect ", file);
9833 assemble_name(file, name);
9834 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9835 fprintf(file, "\t.byte\t0:%d\n", size);
9836
9837 /* Mark the symbol as defined in this module. */
9838 name_tree = get_identifier (name);
9839 TREE_ASM_WRITTEN (name_tree) = 1;
9840 }
9841
9842 #define SECTION_PUBLIC SECTION_MACH_DEP
9843 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9844 static int current_section_align;
9845
9846 /* A get_unnamed_section callback for switching to the text section. */
9847
9848 static void
9849 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9850 {
9851 static int count = 0;
9852 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9853 }
9854
9855 /* A get_unnamed_section callback for switching to the data section. */
9856
9857 static void
9858 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9859 {
9860 static int count = 1;
9861 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9862 }
9863
9864 /* Implement TARGET_ASM_INIT_SECTIONS.
9865
9866 The Cray assembler is really weird with respect to sections. It has only
9867 named sections and you can't reopen a section once it has been closed.
9868 This means that we have to generate unique names whenever we want to
9869 reenter the text or the data section. */
9870
9871 static void
9872 unicosmk_init_sections (void)
9873 {
9874 text_section = get_unnamed_section (SECTION_CODE,
9875 unicosmk_output_text_section_asm_op,
9876 NULL);
9877 data_section = get_unnamed_section (SECTION_WRITE,
9878 unicosmk_output_data_section_asm_op,
9879 NULL);
9880 readonly_data_section = data_section;
9881 }
9882
9883 static unsigned int
9884 unicosmk_section_type_flags (tree decl, const char *name,
9885 int reloc ATTRIBUTE_UNUSED)
9886 {
9887 unsigned int flags = default_section_type_flags (decl, name, reloc);
9888
9889 if (!decl)
9890 return flags;
9891
9892 if (TREE_CODE (decl) == FUNCTION_DECL)
9893 {
9894 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9895 if (align_functions_log > current_section_align)
9896 current_section_align = align_functions_log;
9897
9898 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
9899 flags |= SECTION_MAIN;
9900 }
9901 else
9902 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
9903
9904 if (TREE_PUBLIC (decl))
9905 flags |= SECTION_PUBLIC;
9906
9907 return flags;
9908 }
9909
9910 /* Generate a section name for decl and associate it with the
9911 declaration. */
9912
9913 static void
9914 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
9915 {
9916 const char *name;
9917 int len;
9918
9919 gcc_assert (decl);
9920
9921 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9922 name = default_strip_name_encoding (name);
9923 len = strlen (name);
9924
9925 if (TREE_CODE (decl) == FUNCTION_DECL)
9926 {
9927 char *string;
9928
9929 /* It is essential that we prefix the section name here because
9930 otherwise the section names generated for constructors and
9931 destructors confuse collect2. */
9932
9933 string = alloca (len + 6);
9934 sprintf (string, "code@%s", name);
9935 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9936 }
9937 else if (TREE_PUBLIC (decl))
9938 DECL_SECTION_NAME (decl) = build_string (len, name);
9939 else
9940 {
9941 char *string;
9942
9943 string = alloca (len + 6);
9944 sprintf (string, "data@%s", name);
9945 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
9946 }
9947 }
9948
9949 /* Switch to an arbitrary section NAME with attributes as specified
9950 by FLAGS. ALIGN specifies any known alignment requirements for
9951 the section; 0 if the default should be used. */
9952
9953 static void
9954 unicosmk_asm_named_section (const char *name, unsigned int flags,
9955 tree decl ATTRIBUTE_UNUSED)
9956 {
9957 const char *kind;
9958
9959 /* Close the previous section. */
9960
9961 fputs ("\t.endp\n\n", asm_out_file);
9962
9963 /* Find out what kind of section we are opening. */
9964
9965 if (flags & SECTION_MAIN)
9966 fputs ("\t.start\tmain\n", asm_out_file);
9967
9968 if (flags & SECTION_CODE)
9969 kind = "code";
9970 else if (flags & SECTION_PUBLIC)
9971 kind = "common";
9972 else
9973 kind = "data";
9974
9975 if (current_section_align != 0)
9976 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
9977 current_section_align, kind);
9978 else
9979 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
9980 }
9981
9982 static void
9983 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
9984 {
9985 if (DECL_P (decl)
9986 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
9987 unicosmk_unique_section (decl, 0);
9988 }
9989
9990 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
9991 in code sections because .align fill unused space with zeroes. */
9992
9993 void
9994 unicosmk_output_align (FILE *file, int align)
9995 {
9996 if (inside_function)
9997 fprintf (file, "\tgcc@code@align\t%d\n", align);
9998 else
9999 fprintf (file, "\t.align\t%d\n", align);
10000 }
10001
10002 /* Add a case vector to the current function's list of deferred case
10003 vectors. Case vectors have to be put into a separate section because CAM
10004 does not allow data definitions in code sections. */
10005
10006 void
10007 unicosmk_defer_case_vector (rtx lab, rtx vec)
10008 {
10009 struct machine_function *machine = cfun->machine;
10010
10011 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10012 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10013 machine->addr_list);
10014 }
10015
10016 /* Output a case vector. */
10017
10018 static void
10019 unicosmk_output_addr_vec (FILE *file, rtx vec)
10020 {
10021 rtx lab = XEXP (vec, 0);
10022 rtx body = XEXP (vec, 1);
10023 int vlen = XVECLEN (body, 0);
10024 int idx;
10025
10026 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10027
10028 for (idx = 0; idx < vlen; idx++)
10029 {
10030 ASM_OUTPUT_ADDR_VEC_ELT
10031 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10032 }
10033 }
10034
10035 /* Output current function's deferred case vectors. */
10036
10037 static void
10038 unicosmk_output_deferred_case_vectors (FILE *file)
10039 {
10040 struct machine_function *machine = cfun->machine;
10041 rtx t;
10042
10043 if (machine->addr_list == NULL_RTX)
10044 return;
10045
10046 switch_to_section (data_section);
10047 for (t = machine->addr_list; t; t = XEXP (t, 1))
10048 unicosmk_output_addr_vec (file, XEXP (t, 0));
10049 }
10050
10051 /* Generate the name of the SSIB section for the current function. */
10052
10053 #define SSIB_PREFIX "__SSIB_"
10054 #define SSIB_PREFIX_LEN 7
10055
10056 static const char *
10057 unicosmk_ssib_name (void)
10058 {
10059 /* This is ok since CAM won't be able to deal with names longer than that
10060 anyway. */
10061
10062 static char name[256];
10063
10064 rtx x;
10065 const char *fnname;
10066 int len;
10067
10068 x = DECL_RTL (cfun->decl);
10069 gcc_assert (GET_CODE (x) == MEM);
10070 x = XEXP (x, 0);
10071 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10072 fnname = XSTR (x, 0);
10073
10074 len = strlen (fnname);
10075 if (len + SSIB_PREFIX_LEN > 255)
10076 len = 255 - SSIB_PREFIX_LEN;
10077
10078 strcpy (name, SSIB_PREFIX);
10079 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10080 name[len + SSIB_PREFIX_LEN] = 0;
10081
10082 return name;
10083 }
10084
10085 /* Set up the dynamic subprogram information block (DSIB) and update the
10086 frame pointer register ($15) for subroutines which have a frame. If the
10087 subroutine doesn't have a frame, simply increment $15. */
10088
10089 static void
10090 unicosmk_gen_dsib (unsigned long *imaskP)
10091 {
10092 if (alpha_procedure_type == PT_STACK)
10093 {
10094 const char *ssib_name;
10095 rtx mem;
10096
10097 /* Allocate 64 bytes for the DSIB. */
10098
10099 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10100 GEN_INT (-64))));
10101 emit_insn (gen_blockage ());
10102
10103 /* Save the return address. */
10104
10105 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10106 set_mem_alias_set (mem, alpha_sr_alias_set);
10107 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10108 (*imaskP) &= ~(1UL << REG_RA);
10109
10110 /* Save the old frame pointer. */
10111
10112 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10113 set_mem_alias_set (mem, alpha_sr_alias_set);
10114 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10115 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10116
10117 emit_insn (gen_blockage ());
10118
10119 /* Store the SSIB pointer. */
10120
10121 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10122 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10123 set_mem_alias_set (mem, alpha_sr_alias_set);
10124
10125 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10126 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10127 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10128
10129 /* Save the CIW index. */
10130
10131 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10132 set_mem_alias_set (mem, alpha_sr_alias_set);
10133 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10134
10135 emit_insn (gen_blockage ());
10136
10137 /* Set the new frame pointer. */
10138
10139 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10140 stack_pointer_rtx, GEN_INT (64))));
10141
10142 }
10143 else
10144 {
10145 /* Increment the frame pointer register to indicate that we do not
10146 have a frame. */
10147
10148 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10149 hard_frame_pointer_rtx, const1_rtx)));
10150 }
10151 }
10152
10153 /* Output the static subroutine information block for the current
10154 function. */
10155
10156 static void
10157 unicosmk_output_ssib (FILE *file, const char *fnname)
10158 {
10159 int len;
10160 int i;
10161 rtx x;
10162 rtx ciw;
10163 struct machine_function *machine = cfun->machine;
10164
10165 in_section = NULL;
10166 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10167 unicosmk_ssib_name ());
10168
10169 /* Some required stuff and the function name length. */
10170
10171 len = strlen (fnname);
10172 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10173
10174 /* Saved registers
10175 ??? We don't do that yet. */
10176
10177 fputs ("\t.quad\t0\n", file);
10178
10179 /* Function address. */
10180
10181 fputs ("\t.quad\t", file);
10182 assemble_name (file, fnname);
10183 putc ('\n', file);
10184
10185 fputs ("\t.quad\t0\n", file);
10186 fputs ("\t.quad\t0\n", file);
10187
10188 /* Function name.
10189 ??? We do it the same way Cray CC does it but this could be
10190 simplified. */
10191
10192 for( i = 0; i < len; i++ )
10193 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10194 if( (len % 8) == 0 )
10195 fputs ("\t.quad\t0\n", file);
10196 else
10197 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10198
10199 /* All call information words used in the function. */
10200
10201 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10202 {
10203 ciw = XEXP (x, 0);
10204 #if HOST_BITS_PER_WIDE_INT == 32
10205 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10206 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10207 #else
10208 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10209 #endif
10210 }
10211 }
10212
10213 /* Add a call information word (CIW) to the list of the current function's
10214 CIWs and return its index.
10215
10216 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10217
10218 rtx
10219 unicosmk_add_call_info_word (rtx x)
10220 {
10221 rtx node;
10222 struct machine_function *machine = cfun->machine;
10223
10224 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10225 if (machine->first_ciw == NULL_RTX)
10226 machine->first_ciw = node;
10227 else
10228 XEXP (machine->last_ciw, 1) = node;
10229
10230 machine->last_ciw = node;
10231 ++machine->ciw_count;
10232
10233 return GEN_INT (machine->ciw_count
10234 + strlen (current_function_name ())/8 + 5);
10235 }
10236
10237 /* The Cray assembler doesn't accept extern declarations for symbols which
10238 are defined in the same file. We have to keep track of all global
10239 symbols which are referenced and/or defined in a source file and output
10240 extern declarations for those which are referenced but not defined at
10241 the end of file. */
10242
10243 /* List of identifiers for which an extern declaration might have to be
10244 emitted. */
10245 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10246
10247 struct unicosmk_extern_list
10248 {
10249 struct unicosmk_extern_list *next;
10250 const char *name;
10251 };
10252
10253 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10254
10255 /* Output extern declarations which are required for every asm file. */
10256
10257 static void
10258 unicosmk_output_default_externs (FILE *file)
10259 {
10260 static const char *const externs[] =
10261 { "__T3E_MISMATCH" };
10262
10263 int i;
10264 int n;
10265
10266 n = ARRAY_SIZE (externs);
10267
10268 for (i = 0; i < n; i++)
10269 fprintf (file, "\t.extern\t%s\n", externs[i]);
10270 }
10271
10272 /* Output extern declarations for global symbols which are have been
10273 referenced but not defined. */
10274
10275 static void
10276 unicosmk_output_externs (FILE *file)
10277 {
10278 struct unicosmk_extern_list *p;
10279 const char *real_name;
10280 int len;
10281 tree name_tree;
10282
10283 len = strlen (user_label_prefix);
10284 for (p = unicosmk_extern_head; p != 0; p = p->next)
10285 {
10286 /* We have to strip the encoding and possibly remove user_label_prefix
10287 from the identifier in order to handle -fleading-underscore and
10288 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10289 real_name = default_strip_name_encoding (p->name);
10290 if (len && p->name[0] == '*'
10291 && !memcmp (real_name, user_label_prefix, len))
10292 real_name += len;
10293
10294 name_tree = get_identifier (real_name);
10295 if (! TREE_ASM_WRITTEN (name_tree))
10296 {
10297 TREE_ASM_WRITTEN (name_tree) = 1;
10298 fputs ("\t.extern\t", file);
10299 assemble_name (file, p->name);
10300 putc ('\n', file);
10301 }
10302 }
10303 }
10304
10305 /* Record an extern. */
10306
10307 void
10308 unicosmk_add_extern (const char *name)
10309 {
10310 struct unicosmk_extern_list *p;
10311
10312 p = (struct unicosmk_extern_list *)
10313 xmalloc (sizeof (struct unicosmk_extern_list));
10314 p->next = unicosmk_extern_head;
10315 p->name = name;
10316 unicosmk_extern_head = p;
10317 }
10318
10319 /* The Cray assembler generates incorrect code if identifiers which
10320 conflict with register names are used as instruction operands. We have
10321 to replace such identifiers with DEX expressions. */
10322
10323 /* Structure to collect identifiers which have been replaced by DEX
10324 expressions. */
10325 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10326
10327 struct unicosmk_dex {
10328 struct unicosmk_dex *next;
10329 const char *name;
10330 };
10331
10332 /* List of identifiers which have been replaced by DEX expressions. The DEX
10333 number is determined by the position in the list. */
10334
10335 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10336
10337 /* The number of elements in the DEX list. */
10338
10339 static int unicosmk_dex_count = 0;
10340
10341 /* Check if NAME must be replaced by a DEX expression. */
10342
10343 static int
10344 unicosmk_special_name (const char *name)
10345 {
10346 if (name[0] == '*')
10347 ++name;
10348
10349 if (name[0] == '$')
10350 ++name;
10351
10352 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10353 return 0;
10354
10355 switch (name[1])
10356 {
10357 case '1': case '2':
10358 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10359
10360 case '3':
10361 return (name[2] == '\0'
10362 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10363
10364 default:
10365 return (ISDIGIT (name[1]) && name[2] == '\0');
10366 }
10367 }
10368
10369 /* Return the DEX number if X must be replaced by a DEX expression and 0
10370 otherwise. */
10371
10372 static int
10373 unicosmk_need_dex (rtx x)
10374 {
10375 struct unicosmk_dex *dex;
10376 const char *name;
10377 int i;
10378
10379 if (GET_CODE (x) != SYMBOL_REF)
10380 return 0;
10381
10382 name = XSTR (x,0);
10383 if (! unicosmk_special_name (name))
10384 return 0;
10385
10386 i = unicosmk_dex_count;
10387 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10388 {
10389 if (! strcmp (name, dex->name))
10390 return i;
10391 --i;
10392 }
10393
10394 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10395 dex->name = name;
10396 dex->next = unicosmk_dex_list;
10397 unicosmk_dex_list = dex;
10398
10399 ++unicosmk_dex_count;
10400 return unicosmk_dex_count;
10401 }
10402
10403 /* Output the DEX definitions for this file. */
10404
10405 static void
10406 unicosmk_output_dex (FILE *file)
10407 {
10408 struct unicosmk_dex *dex;
10409 int i;
10410
10411 if (unicosmk_dex_list == NULL)
10412 return;
10413
10414 fprintf (file, "\t.dexstart\n");
10415
10416 i = unicosmk_dex_count;
10417 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10418 {
10419 fprintf (file, "\tDEX (%d) = ", i);
10420 assemble_name (file, dex->name);
10421 putc ('\n', file);
10422 --i;
10423 }
10424
10425 fprintf (file, "\t.dexend\n");
10426 }
10427
10428 /* Output text that to appear at the beginning of an assembler file. */
10429
10430 static void
10431 unicosmk_file_start (void)
10432 {
10433 int i;
10434
10435 fputs ("\t.ident\t", asm_out_file);
10436 unicosmk_output_module_name (asm_out_file);
10437 fputs ("\n\n", asm_out_file);
10438
10439 /* The Unicos/Mk assembler uses different register names. Instead of trying
10440 to support them, we simply use micro definitions. */
10441
10442 /* CAM has different register names: rN for the integer register N and fN
10443 for the floating-point register N. Instead of trying to use these in
10444 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10445 register. */
10446
10447 for (i = 0; i < 32; ++i)
10448 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10449
10450 for (i = 0; i < 32; ++i)
10451 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10452
10453 putc ('\n', asm_out_file);
10454
10455 /* The .align directive fill unused space with zeroes which does not work
10456 in code sections. We define the macro 'gcc@code@align' which uses nops
10457 instead. Note that it assumes that code sections always have the
10458 biggest possible alignment since . refers to the current offset from
10459 the beginning of the section. */
10460
10461 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10462 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10463 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10464 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10465 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10466 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10467 fputs ("\t.endr\n", asm_out_file);
10468 fputs ("\t.endif\n", asm_out_file);
10469 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10470
10471 /* Output extern declarations which should always be visible. */
10472 unicosmk_output_default_externs (asm_out_file);
10473
10474 /* Open a dummy section. We always need to be inside a section for the
10475 section-switching code to work correctly.
10476 ??? This should be a module id or something like that. I still have to
10477 figure out what the rules for those are. */
10478 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10479 }
10480
10481 /* Output text to appear at the end of an assembler file. This includes all
10482 pending extern declarations and DEX expressions. */
10483
10484 static void
10485 unicosmk_file_end (void)
10486 {
10487 fputs ("\t.endp\n\n", asm_out_file);
10488
10489 /* Output all pending externs. */
10490
10491 unicosmk_output_externs (asm_out_file);
10492
10493 /* Output dex definitions used for functions whose names conflict with
10494 register names. */
10495
10496 unicosmk_output_dex (asm_out_file);
10497
10498 fputs ("\t.end\t", asm_out_file);
10499 unicosmk_output_module_name (asm_out_file);
10500 putc ('\n', asm_out_file);
10501 }
10502
10503 #else
10504
10505 static void
10506 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10507 {}
10508
10509 static void
10510 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10511 {}
10512
10513 static void
10514 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10515 const char * fnname ATTRIBUTE_UNUSED)
10516 {}
10517
10518 rtx
10519 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10520 {
10521 return NULL_RTX;
10522 }
10523
10524 static int
10525 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10526 {
10527 return 0;
10528 }
10529
10530 #endif /* TARGET_ABI_UNICOSMK */
10531
10532 static void
10533 alpha_init_libfuncs (void)
10534 {
10535 if (TARGET_ABI_UNICOSMK)
10536 {
10537 /* Prevent gcc from generating calls to __divsi3. */
10538 set_optab_libfunc (sdiv_optab, SImode, 0);
10539 set_optab_libfunc (udiv_optab, SImode, 0);
10540
10541 /* Use the functions provided by the system library
10542 for DImode integer division. */
10543 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10544 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10545 }
10546 else if (TARGET_ABI_OPEN_VMS)
10547 {
10548 /* Use the VMS runtime library functions for division and
10549 remainder. */
10550 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10551 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10552 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10553 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10554 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10555 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10556 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10557 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10558 }
10559 }
10560
10561 \f
10562 /* Initialize the GCC target structure. */
10563 #if TARGET_ABI_OPEN_VMS
10564 # undef TARGET_ATTRIBUTE_TABLE
10565 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10566 # undef TARGET_SECTION_TYPE_FLAGS
10567 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10568 #endif
10569
10570 #undef TARGET_IN_SMALL_DATA_P
10571 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10572
10573 #if TARGET_ABI_UNICOSMK
10574 # undef TARGET_INSERT_ATTRIBUTES
10575 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10576 # undef TARGET_SECTION_TYPE_FLAGS
10577 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10578 # undef TARGET_ASM_UNIQUE_SECTION
10579 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10580 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10581 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10582 # undef TARGET_ASM_GLOBALIZE_LABEL
10583 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10584 # undef TARGET_MUST_PASS_IN_STACK
10585 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10586 #endif
10587
10588 #undef TARGET_ASM_ALIGNED_HI_OP
10589 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10590 #undef TARGET_ASM_ALIGNED_DI_OP
10591 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10592
10593 /* Default unaligned ops are provided for ELF systems. To get unaligned
10594 data for non-ELF systems, we have to turn off auto alignment. */
10595 #ifndef OBJECT_FORMAT_ELF
10596 #undef TARGET_ASM_UNALIGNED_HI_OP
10597 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10598 #undef TARGET_ASM_UNALIGNED_SI_OP
10599 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10600 #undef TARGET_ASM_UNALIGNED_DI_OP
10601 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10602 #endif
10603
10604 #ifdef OBJECT_FORMAT_ELF
10605 #undef TARGET_ASM_RELOC_RW_MASK
10606 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10607 #undef TARGET_ASM_SELECT_RTX_SECTION
10608 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10609 #undef TARGET_SECTION_TYPE_FLAGS
10610 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10611 #endif
10612
10613 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10614 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10615
10616 #undef TARGET_INIT_LIBFUNCS
10617 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10618
10619 #if TARGET_ABI_UNICOSMK
10620 #undef TARGET_ASM_FILE_START
10621 #define TARGET_ASM_FILE_START unicosmk_file_start
10622 #undef TARGET_ASM_FILE_END
10623 #define TARGET_ASM_FILE_END unicosmk_file_end
10624 #else
10625 #undef TARGET_ASM_FILE_START
10626 #define TARGET_ASM_FILE_START alpha_file_start
10627 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10628 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10629 #endif
10630
10631 #undef TARGET_SCHED_ADJUST_COST
10632 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10633 #undef TARGET_SCHED_ISSUE_RATE
10634 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10635 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10636 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10637 alpha_multipass_dfa_lookahead
10638
10639 #undef TARGET_HAVE_TLS
10640 #define TARGET_HAVE_TLS HAVE_AS_TLS
10641
10642 #undef TARGET_INIT_BUILTINS
10643 #define TARGET_INIT_BUILTINS alpha_init_builtins
10644 #undef TARGET_EXPAND_BUILTIN
10645 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10646 #undef TARGET_FOLD_BUILTIN
10647 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10648
10649 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10650 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10651 #undef TARGET_CANNOT_COPY_INSN_P
10652 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10653 #undef TARGET_CANNOT_FORCE_CONST_MEM
10654 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10655
10656 #if TARGET_ABI_OSF
10657 #undef TARGET_ASM_OUTPUT_MI_THUNK
10658 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10659 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10660 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10661 #undef TARGET_STDARG_OPTIMIZE_HOOK
10662 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10663 #endif
10664
10665 #undef TARGET_RTX_COSTS
10666 #define TARGET_RTX_COSTS alpha_rtx_costs
10667 #undef TARGET_ADDRESS_COST
10668 #define TARGET_ADDRESS_COST hook_int_rtx_0
10669
10670 #undef TARGET_MACHINE_DEPENDENT_REORG
10671 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10672
10673 #undef TARGET_PROMOTE_FUNCTION_ARGS
10674 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10675 #undef TARGET_PROMOTE_FUNCTION_RETURN
10676 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10677 #undef TARGET_PROMOTE_PROTOTYPES
10678 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10679 #undef TARGET_RETURN_IN_MEMORY
10680 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10681 #undef TARGET_PASS_BY_REFERENCE
10682 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10683 #undef TARGET_SETUP_INCOMING_VARARGS
10684 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10685 #undef TARGET_STRICT_ARGUMENT_NAMING
10686 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10687 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10688 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10689 #undef TARGET_SPLIT_COMPLEX_ARG
10690 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10691 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10692 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10693 #undef TARGET_ARG_PARTIAL_BYTES
10694 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10695
10696 #undef TARGET_SECONDARY_RELOAD
10697 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10698
10699 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10700 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10701 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10702 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10703
10704 #undef TARGET_BUILD_BUILTIN_VA_LIST
10705 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10706
10707 /* The Alpha architecture does not require sequential consistency. See
10708 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10709 for an example of how it can be violated in practice. */
10710 #undef TARGET_RELAXED_ORDERING
10711 #define TARGET_RELAXED_ORDERING true
10712
10713 #undef TARGET_DEFAULT_TARGET_FLAGS
10714 #define TARGET_DEFAULT_TARGET_FLAGS \
10715 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10716 #undef TARGET_HANDLE_OPTION
10717 #define TARGET_HANDLE_OPTION alpha_handle_option
10718
10719 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10720 #undef TARGET_MANGLE_TYPE
10721 #define TARGET_MANGLE_TYPE alpha_mangle_type
10722 #endif
10723
10724 struct gcc_target targetm = TARGET_INITIALIZER;
10725
10726 \f
10727 #include "gt-alpha.h"