re PR target/40577 (ICE on valid code: in extract_insn)
[gcc.git] / gcc / config / alpha / alpha.c
1 /* Subroutines used for code generation on the DEC Alpha.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
12 any later version.
13
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "regs.h"
31 #include "hard-reg-set.h"
32 #include "real.h"
33 #include "insn-config.h"
34 #include "conditions.h"
35 #include "output.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "recog.h"
39 #include "expr.h"
40 #include "optabs.h"
41 #include "reload.h"
42 #include "obstack.h"
43 #include "except.h"
44 #include "function.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "integrate.h"
48 #include "tm_p.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "debug.h"
52 #include "langhooks.h"
53 #include <splay-tree.h>
54 #include "cfglayout.h"
55 #include "gimple.h"
56 #include "tree-flow.h"
57 #include "tree-stdarg.h"
58 #include "tm-constrs.h"
59 #include "df.h"
60
61 /* Specify which cpu to schedule for. */
62 enum processor_type alpha_tune;
63
64 /* Which cpu we're generating code for. */
65 enum processor_type alpha_cpu;
66
67 static const char * const alpha_cpu_name[] =
68 {
69 "ev4", "ev5", "ev6"
70 };
71
72 /* Specify how accurate floating-point traps need to be. */
73
74 enum alpha_trap_precision alpha_tp;
75
76 /* Specify the floating-point rounding mode. */
77
78 enum alpha_fp_rounding_mode alpha_fprm;
79
80 /* Specify which things cause traps. */
81
82 enum alpha_fp_trap_mode alpha_fptm;
83
84 /* Nonzero if inside of a function, because the Alpha asm can't
85 handle .files inside of functions. */
86
87 static int inside_function = FALSE;
88
89 /* The number of cycles of latency we should assume on memory reads. */
90
91 int alpha_memory_latency = 3;
92
93 /* Whether the function needs the GP. */
94
95 static int alpha_function_needs_gp;
96
97 /* The alias set for prologue/epilogue register save/restore. */
98
99 static GTY(()) alias_set_type alpha_sr_alias_set;
100
101 /* The assembler name of the current function. */
102
103 static const char *alpha_fnname;
104
105 /* The next explicit relocation sequence number. */
106 extern GTY(()) int alpha_next_sequence_number;
107 int alpha_next_sequence_number = 1;
108
109 /* The literal and gpdisp sequence numbers for this insn, as printed
110 by %# and %* respectively. */
111 extern GTY(()) int alpha_this_literal_sequence_number;
112 extern GTY(()) int alpha_this_gpdisp_sequence_number;
113 int alpha_this_literal_sequence_number;
114 int alpha_this_gpdisp_sequence_number;
115
116 /* Costs of various operations on the different architectures. */
117
118 struct alpha_rtx_cost_data
119 {
120 unsigned char fp_add;
121 unsigned char fp_mult;
122 unsigned char fp_div_sf;
123 unsigned char fp_div_df;
124 unsigned char int_mult_si;
125 unsigned char int_mult_di;
126 unsigned char int_shift;
127 unsigned char int_cmov;
128 unsigned short int_div;
129 };
130
131 static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
132 {
133 { /* EV4 */
134 COSTS_N_INSNS (6), /* fp_add */
135 COSTS_N_INSNS (6), /* fp_mult */
136 COSTS_N_INSNS (34), /* fp_div_sf */
137 COSTS_N_INSNS (63), /* fp_div_df */
138 COSTS_N_INSNS (23), /* int_mult_si */
139 COSTS_N_INSNS (23), /* int_mult_di */
140 COSTS_N_INSNS (2), /* int_shift */
141 COSTS_N_INSNS (2), /* int_cmov */
142 COSTS_N_INSNS (97), /* int_div */
143 },
144 { /* EV5 */
145 COSTS_N_INSNS (4), /* fp_add */
146 COSTS_N_INSNS (4), /* fp_mult */
147 COSTS_N_INSNS (15), /* fp_div_sf */
148 COSTS_N_INSNS (22), /* fp_div_df */
149 COSTS_N_INSNS (8), /* int_mult_si */
150 COSTS_N_INSNS (12), /* int_mult_di */
151 COSTS_N_INSNS (1) + 1, /* int_shift */
152 COSTS_N_INSNS (1), /* int_cmov */
153 COSTS_N_INSNS (83), /* int_div */
154 },
155 { /* EV6 */
156 COSTS_N_INSNS (4), /* fp_add */
157 COSTS_N_INSNS (4), /* fp_mult */
158 COSTS_N_INSNS (12), /* fp_div_sf */
159 COSTS_N_INSNS (15), /* fp_div_df */
160 COSTS_N_INSNS (7), /* int_mult_si */
161 COSTS_N_INSNS (7), /* int_mult_di */
162 COSTS_N_INSNS (1), /* int_shift */
163 COSTS_N_INSNS (2), /* int_cmov */
164 COSTS_N_INSNS (86), /* int_div */
165 },
166 };
167
168 /* Similar but tuned for code size instead of execution latency. The
169 extra +N is fractional cost tuning based on latency. It's used to
170 encourage use of cheaper insns like shift, but only if there's just
171 one of them. */
172
173 static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
174 {
175 COSTS_N_INSNS (1), /* fp_add */
176 COSTS_N_INSNS (1), /* fp_mult */
177 COSTS_N_INSNS (1), /* fp_div_sf */
178 COSTS_N_INSNS (1) + 1, /* fp_div_df */
179 COSTS_N_INSNS (1) + 1, /* int_mult_si */
180 COSTS_N_INSNS (1) + 2, /* int_mult_di */
181 COSTS_N_INSNS (1), /* int_shift */
182 COSTS_N_INSNS (1), /* int_cmov */
183 COSTS_N_INSNS (6), /* int_div */
184 };
185
186 /* Get the number of args of a function in one of two ways. */
187 #if TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK
188 #define NUM_ARGS crtl->args.info.num_args
189 #else
190 #define NUM_ARGS crtl->args.info
191 #endif
192
193 #define REG_PV 27
194 #define REG_RA 26
195
196 /* Declarations of static functions. */
197 static struct machine_function *alpha_init_machine_status (void);
198 static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
199
200 #if TARGET_ABI_OPEN_VMS
201 static void alpha_write_linkage (FILE *, const char *, tree);
202 #endif
203
204 static void unicosmk_output_deferred_case_vectors (FILE *);
205 static void unicosmk_gen_dsib (unsigned long *);
206 static void unicosmk_output_ssib (FILE *, const char *);
207 static int unicosmk_need_dex (rtx);
208 \f
209 /* Implement TARGET_HANDLE_OPTION. */
210
211 static bool
212 alpha_handle_option (size_t code, const char *arg, int value)
213 {
214 switch (code)
215 {
216 case OPT_mfp_regs:
217 if (value == 0)
218 target_flags |= MASK_SOFT_FP;
219 break;
220
221 case OPT_mieee:
222 case OPT_mieee_with_inexact:
223 target_flags |= MASK_IEEE_CONFORMANT;
224 break;
225
226 case OPT_mtls_size_:
227 if (value != 16 && value != 32 && value != 64)
228 error ("bad value %qs for -mtls-size switch", arg);
229 break;
230 }
231
232 return true;
233 }
234
235 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
236 /* Implement TARGET_MANGLE_TYPE. */
237
238 static const char *
239 alpha_mangle_type (const_tree type)
240 {
241 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
242 && TARGET_LONG_DOUBLE_128)
243 return "g";
244
245 /* For all other types, use normal C++ mangling. */
246 return NULL;
247 }
248 #endif
249
250 /* Parse target option strings. */
251
252 void
253 override_options (void)
254 {
255 static const struct cpu_table {
256 const char *const name;
257 const enum processor_type processor;
258 const int flags;
259 } cpu_table[] = {
260 { "ev4", PROCESSOR_EV4, 0 },
261 { "ev45", PROCESSOR_EV4, 0 },
262 { "21064", PROCESSOR_EV4, 0 },
263 { "ev5", PROCESSOR_EV5, 0 },
264 { "21164", PROCESSOR_EV5, 0 },
265 { "ev56", PROCESSOR_EV5, MASK_BWX },
266 { "21164a", PROCESSOR_EV5, MASK_BWX },
267 { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX },
268 { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
269 { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX },
270 { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
271 { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX },
272 { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX },
273 { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX }
274 };
275
276 int const ct_size = ARRAY_SIZE (cpu_table);
277 int i;
278
279 /* Unicos/Mk doesn't have shared libraries. */
280 if (TARGET_ABI_UNICOSMK && flag_pic)
281 {
282 warning (0, "-f%s ignored for Unicos/Mk (not supported)",
283 (flag_pic > 1) ? "PIC" : "pic");
284 flag_pic = 0;
285 }
286
287 /* On Unicos/Mk, the native compiler consistently generates /d suffices for
288 floating-point instructions. Make that the default for this target. */
289 if (TARGET_ABI_UNICOSMK)
290 alpha_fprm = ALPHA_FPRM_DYN;
291 else
292 alpha_fprm = ALPHA_FPRM_NORM;
293
294 alpha_tp = ALPHA_TP_PROG;
295 alpha_fptm = ALPHA_FPTM_N;
296
297 /* We cannot use su and sui qualifiers for conversion instructions on
298 Unicos/Mk. I'm not sure if this is due to assembler or hardware
299 limitations. Right now, we issue a warning if -mieee is specified
300 and then ignore it; eventually, we should either get it right or
301 disable the option altogether. */
302
303 if (TARGET_IEEE)
304 {
305 if (TARGET_ABI_UNICOSMK)
306 warning (0, "-mieee not supported on Unicos/Mk");
307 else
308 {
309 alpha_tp = ALPHA_TP_INSN;
310 alpha_fptm = ALPHA_FPTM_SU;
311 }
312 }
313
314 if (TARGET_IEEE_WITH_INEXACT)
315 {
316 if (TARGET_ABI_UNICOSMK)
317 warning (0, "-mieee-with-inexact not supported on Unicos/Mk");
318 else
319 {
320 alpha_tp = ALPHA_TP_INSN;
321 alpha_fptm = ALPHA_FPTM_SUI;
322 }
323 }
324
325 if (alpha_tp_string)
326 {
327 if (! strcmp (alpha_tp_string, "p"))
328 alpha_tp = ALPHA_TP_PROG;
329 else if (! strcmp (alpha_tp_string, "f"))
330 alpha_tp = ALPHA_TP_FUNC;
331 else if (! strcmp (alpha_tp_string, "i"))
332 alpha_tp = ALPHA_TP_INSN;
333 else
334 error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
335 }
336
337 if (alpha_fprm_string)
338 {
339 if (! strcmp (alpha_fprm_string, "n"))
340 alpha_fprm = ALPHA_FPRM_NORM;
341 else if (! strcmp (alpha_fprm_string, "m"))
342 alpha_fprm = ALPHA_FPRM_MINF;
343 else if (! strcmp (alpha_fprm_string, "c"))
344 alpha_fprm = ALPHA_FPRM_CHOP;
345 else if (! strcmp (alpha_fprm_string,"d"))
346 alpha_fprm = ALPHA_FPRM_DYN;
347 else
348 error ("bad value %qs for -mfp-rounding-mode switch",
349 alpha_fprm_string);
350 }
351
352 if (alpha_fptm_string)
353 {
354 if (strcmp (alpha_fptm_string, "n") == 0)
355 alpha_fptm = ALPHA_FPTM_N;
356 else if (strcmp (alpha_fptm_string, "u") == 0)
357 alpha_fptm = ALPHA_FPTM_U;
358 else if (strcmp (alpha_fptm_string, "su") == 0)
359 alpha_fptm = ALPHA_FPTM_SU;
360 else if (strcmp (alpha_fptm_string, "sui") == 0)
361 alpha_fptm = ALPHA_FPTM_SUI;
362 else
363 error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
364 }
365
366 if (alpha_cpu_string)
367 {
368 for (i = 0; i < ct_size; i++)
369 if (! strcmp (alpha_cpu_string, cpu_table [i].name))
370 {
371 alpha_tune = alpha_cpu = cpu_table [i].processor;
372 target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
373 target_flags |= cpu_table [i].flags;
374 break;
375 }
376 if (i == ct_size)
377 error ("bad value %qs for -mcpu switch", alpha_cpu_string);
378 }
379
380 if (alpha_tune_string)
381 {
382 for (i = 0; i < ct_size; i++)
383 if (! strcmp (alpha_tune_string, cpu_table [i].name))
384 {
385 alpha_tune = cpu_table [i].processor;
386 break;
387 }
388 if (i == ct_size)
389 error ("bad value %qs for -mcpu switch", alpha_tune_string);
390 }
391
392 /* Do some sanity checks on the above options. */
393
394 if (TARGET_ABI_UNICOSMK && alpha_fptm != ALPHA_FPTM_N)
395 {
396 warning (0, "trap mode not supported on Unicos/Mk");
397 alpha_fptm = ALPHA_FPTM_N;
398 }
399
400 if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
401 && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
402 {
403 warning (0, "fp software completion requires -mtrap-precision=i");
404 alpha_tp = ALPHA_TP_INSN;
405 }
406
407 if (alpha_cpu == PROCESSOR_EV6)
408 {
409 /* Except for EV6 pass 1 (not released), we always have precise
410 arithmetic traps. Which means we can do software completion
411 without minding trap shadows. */
412 alpha_tp = ALPHA_TP_PROG;
413 }
414
415 if (TARGET_FLOAT_VAX)
416 {
417 if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
418 {
419 warning (0, "rounding mode not supported for VAX floats");
420 alpha_fprm = ALPHA_FPRM_NORM;
421 }
422 if (alpha_fptm == ALPHA_FPTM_SUI)
423 {
424 warning (0, "trap mode not supported for VAX floats");
425 alpha_fptm = ALPHA_FPTM_SU;
426 }
427 if (target_flags_explicit & MASK_LONG_DOUBLE_128)
428 warning (0, "128-bit long double not supported for VAX floats");
429 target_flags &= ~MASK_LONG_DOUBLE_128;
430 }
431
432 {
433 char *end;
434 int lat;
435
436 if (!alpha_mlat_string)
437 alpha_mlat_string = "L1";
438
439 if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
440 && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
441 ;
442 else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
443 && ISDIGIT ((unsigned char)alpha_mlat_string[1])
444 && alpha_mlat_string[2] == '\0')
445 {
446 static int const cache_latency[][4] =
447 {
448 { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
449 { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
450 { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
451 };
452
453 lat = alpha_mlat_string[1] - '0';
454 if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
455 {
456 warning (0, "L%d cache latency unknown for %s",
457 lat, alpha_cpu_name[alpha_tune]);
458 lat = 3;
459 }
460 else
461 lat = cache_latency[alpha_tune][lat-1];
462 }
463 else if (! strcmp (alpha_mlat_string, "main"))
464 {
465 /* Most current memories have about 370ns latency. This is
466 a reasonable guess for a fast cpu. */
467 lat = 150;
468 }
469 else
470 {
471 warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
472 lat = 3;
473 }
474
475 alpha_memory_latency = lat;
476 }
477
478 /* Default the definition of "small data" to 8 bytes. */
479 if (!g_switch_set)
480 g_switch_value = 8;
481
482 /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
483 if (flag_pic == 1)
484 target_flags |= MASK_SMALL_DATA;
485 else if (flag_pic == 2)
486 target_flags &= ~MASK_SMALL_DATA;
487
488 /* Align labels and loops for optimal branching. */
489 /* ??? Kludge these by not doing anything if we don't optimize and also if
490 we are writing ECOFF symbols to work around a bug in DEC's assembler. */
491 if (optimize > 0 && write_symbols != SDB_DEBUG)
492 {
493 if (align_loops <= 0)
494 align_loops = 16;
495 if (align_jumps <= 0)
496 align_jumps = 16;
497 }
498 if (align_functions <= 0)
499 align_functions = 16;
500
501 /* Acquire a unique set number for our register saves and restores. */
502 alpha_sr_alias_set = new_alias_set ();
503
504 /* Register variables and functions with the garbage collector. */
505
506 /* Set up function hooks. */
507 init_machine_status = alpha_init_machine_status;
508
509 /* Tell the compiler when we're using VAX floating point. */
510 if (TARGET_FLOAT_VAX)
511 {
512 REAL_MODE_FORMAT (SFmode) = &vax_f_format;
513 REAL_MODE_FORMAT (DFmode) = &vax_g_format;
514 REAL_MODE_FORMAT (TFmode) = NULL;
515 }
516
517 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
518 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
519 target_flags |= MASK_LONG_DOUBLE_128;
520 #endif
521
522 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
523 can be optimized to ap = __builtin_next_arg (0). */
524 if (TARGET_ABI_UNICOSMK)
525 targetm.expand_builtin_va_start = NULL;
526 }
527 \f
528 /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
529
530 int
531 zap_mask (HOST_WIDE_INT value)
532 {
533 int i;
534
535 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
536 i++, value >>= 8)
537 if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
538 return 0;
539
540 return 1;
541 }
542
543 /* Return true if OP is valid for a particular TLS relocation.
544 We are already guaranteed that OP is a CONST. */
545
546 int
547 tls_symbolic_operand_1 (rtx op, int size, int unspec)
548 {
549 op = XEXP (op, 0);
550
551 if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
552 return 0;
553 op = XVECEXP (op, 0, 0);
554
555 if (GET_CODE (op) != SYMBOL_REF)
556 return 0;
557
558 switch (SYMBOL_REF_TLS_MODEL (op))
559 {
560 case TLS_MODEL_LOCAL_DYNAMIC:
561 return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
562 case TLS_MODEL_INITIAL_EXEC:
563 return unspec == UNSPEC_TPREL && size == 64;
564 case TLS_MODEL_LOCAL_EXEC:
565 return unspec == UNSPEC_TPREL && size == alpha_tls_size;
566 default:
567 gcc_unreachable ();
568 }
569 }
570
571 /* Used by aligned_memory_operand and unaligned_memory_operand to
572 resolve what reload is going to do with OP if it's a register. */
573
574 rtx
575 resolve_reload_operand (rtx op)
576 {
577 if (reload_in_progress)
578 {
579 rtx tmp = op;
580 if (GET_CODE (tmp) == SUBREG)
581 tmp = SUBREG_REG (tmp);
582 if (REG_P (tmp)
583 && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
584 {
585 op = reg_equiv_memory_loc[REGNO (tmp)];
586 if (op == 0)
587 return 0;
588 }
589 }
590 return op;
591 }
592
593 /* The scalar modes supported differs from the default check-what-c-supports
594 version in that sometimes TFmode is available even when long double
595 indicates only DFmode. On unicosmk, we have the situation that HImode
596 doesn't map to any C type, but of course we still support that. */
597
598 static bool
599 alpha_scalar_mode_supported_p (enum machine_mode mode)
600 {
601 switch (mode)
602 {
603 case QImode:
604 case HImode:
605 case SImode:
606 case DImode:
607 case TImode: /* via optabs.c */
608 return true;
609
610 case SFmode:
611 case DFmode:
612 return true;
613
614 case TFmode:
615 return TARGET_HAS_XFLOATING_LIBS;
616
617 default:
618 return false;
619 }
620 }
621
622 /* Alpha implements a couple of integer vector mode operations when
623 TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
624 which allows the vectorizer to operate on e.g. move instructions,
625 or when expand_vector_operations can do something useful. */
626
627 static bool
628 alpha_vector_mode_supported_p (enum machine_mode mode)
629 {
630 return mode == V8QImode || mode == V4HImode || mode == V2SImode;
631 }
632
633 /* Return 1 if this function can directly return via $26. */
634
635 int
636 direct_return (void)
637 {
638 return (! TARGET_ABI_OPEN_VMS && ! TARGET_ABI_UNICOSMK
639 && reload_completed
640 && alpha_sa_size () == 0
641 && get_frame_size () == 0
642 && crtl->outgoing_args_size == 0
643 && crtl->args.pretend_args_size == 0);
644 }
645
646 /* Return the ADDR_VEC associated with a tablejump insn. */
647
648 rtx
649 alpha_tablejump_addr_vec (rtx insn)
650 {
651 rtx tmp;
652
653 tmp = JUMP_LABEL (insn);
654 if (!tmp)
655 return NULL_RTX;
656 tmp = NEXT_INSN (tmp);
657 if (!tmp)
658 return NULL_RTX;
659 if (JUMP_P (tmp)
660 && GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC)
661 return PATTERN (tmp);
662 return NULL_RTX;
663 }
664
665 /* Return the label of the predicted edge, or CONST0_RTX if we don't know. */
666
667 rtx
668 alpha_tablejump_best_label (rtx insn)
669 {
670 rtx jump_table = alpha_tablejump_addr_vec (insn);
671 rtx best_label = NULL_RTX;
672
673 /* ??? Once the CFG doesn't keep getting completely rebuilt, look
674 there for edge frequency counts from profile data. */
675
676 if (jump_table)
677 {
678 int n_labels = XVECLEN (jump_table, 1);
679 int best_count = -1;
680 int i, j;
681
682 for (i = 0; i < n_labels; i++)
683 {
684 int count = 1;
685
686 for (j = i + 1; j < n_labels; j++)
687 if (XEXP (XVECEXP (jump_table, 1, i), 0)
688 == XEXP (XVECEXP (jump_table, 1, j), 0))
689 count++;
690
691 if (count > best_count)
692 best_count = count, best_label = XVECEXP (jump_table, 1, i);
693 }
694 }
695
696 return best_label ? best_label : const0_rtx;
697 }
698
699 /* Return the TLS model to use for SYMBOL. */
700
701 static enum tls_model
702 tls_symbolic_operand_type (rtx symbol)
703 {
704 enum tls_model model;
705
706 if (GET_CODE (symbol) != SYMBOL_REF)
707 return TLS_MODEL_NONE;
708 model = SYMBOL_REF_TLS_MODEL (symbol);
709
710 /* Local-exec with a 64-bit size is the same code as initial-exec. */
711 if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
712 model = TLS_MODEL_INITIAL_EXEC;
713
714 return model;
715 }
716 \f
717 /* Return true if the function DECL will share the same GP as any
718 function in the current unit of translation. */
719
720 static bool
721 decl_has_samegp (const_tree decl)
722 {
723 /* Functions that are not local can be overridden, and thus may
724 not share the same gp. */
725 if (!(*targetm.binds_local_p) (decl))
726 return false;
727
728 /* If -msmall-data is in effect, assume that there is only one GP
729 for the module, and so any local symbol has this property. We
730 need explicit relocations to be able to enforce this for symbols
731 not defined in this unit of translation, however. */
732 if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
733 return true;
734
735 /* Functions that are not external are defined in this UoT. */
736 /* ??? Irritatingly, static functions not yet emitted are still
737 marked "external". Apply this to non-static functions only. */
738 return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
739 }
740
741 /* Return true if EXP should be placed in the small data section. */
742
743 static bool
744 alpha_in_small_data_p (const_tree exp)
745 {
746 /* We want to merge strings, so we never consider them small data. */
747 if (TREE_CODE (exp) == STRING_CST)
748 return false;
749
750 /* Functions are never in the small data area. Duh. */
751 if (TREE_CODE (exp) == FUNCTION_DECL)
752 return false;
753
754 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
755 {
756 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
757 if (strcmp (section, ".sdata") == 0
758 || strcmp (section, ".sbss") == 0)
759 return true;
760 }
761 else
762 {
763 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
764
765 /* If this is an incomplete type with size 0, then we can't put it
766 in sdata because it might be too big when completed. */
767 if (size > 0 && (unsigned HOST_WIDE_INT) size <= g_switch_value)
768 return true;
769 }
770
771 return false;
772 }
773
774 #if TARGET_ABI_OPEN_VMS
775 static bool
776 alpha_linkage_symbol_p (const char *symname)
777 {
778 int symlen = strlen (symname);
779
780 if (symlen > 4)
781 return strcmp (&symname [symlen - 4], "..lk") == 0;
782
783 return false;
784 }
785
786 #define LINKAGE_SYMBOL_REF_P(X) \
787 ((GET_CODE (X) == SYMBOL_REF \
788 && alpha_linkage_symbol_p (XSTR (X, 0))) \
789 || (GET_CODE (X) == CONST \
790 && GET_CODE (XEXP (X, 0)) == PLUS \
791 && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
792 && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
793 #endif
794
795 /* legitimate_address_p recognizes an RTL expression that is a valid
796 memory address for an instruction. The MODE argument is the
797 machine mode for the MEM expression that wants to use this address.
798
799 For Alpha, we have either a constant address or the sum of a
800 register and a constant address, or just a register. For DImode,
801 any of those forms can be surrounded with an AND that clear the
802 low-order three bits; this is an "unaligned" access. */
803
804 static bool
805 alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
806 {
807 /* If this is an ldq_u type address, discard the outer AND. */
808 if (mode == DImode
809 && GET_CODE (x) == AND
810 && CONST_INT_P (XEXP (x, 1))
811 && INTVAL (XEXP (x, 1)) == -8)
812 x = XEXP (x, 0);
813
814 /* Discard non-paradoxical subregs. */
815 if (GET_CODE (x) == SUBREG
816 && (GET_MODE_SIZE (GET_MODE (x))
817 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
818 x = SUBREG_REG (x);
819
820 /* Unadorned general registers are valid. */
821 if (REG_P (x)
822 && (strict
823 ? STRICT_REG_OK_FOR_BASE_P (x)
824 : NONSTRICT_REG_OK_FOR_BASE_P (x)))
825 return true;
826
827 /* Constant addresses (i.e. +/- 32k) are valid. */
828 if (CONSTANT_ADDRESS_P (x))
829 return true;
830
831 #if TARGET_ABI_OPEN_VMS
832 if (LINKAGE_SYMBOL_REF_P (x))
833 return true;
834 #endif
835
836 /* Register plus a small constant offset is valid. */
837 if (GET_CODE (x) == PLUS)
838 {
839 rtx ofs = XEXP (x, 1);
840 x = XEXP (x, 0);
841
842 /* Discard non-paradoxical subregs. */
843 if (GET_CODE (x) == SUBREG
844 && (GET_MODE_SIZE (GET_MODE (x))
845 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
846 x = SUBREG_REG (x);
847
848 if (REG_P (x))
849 {
850 if (! strict
851 && NONSTRICT_REG_OK_FP_BASE_P (x)
852 && CONST_INT_P (ofs))
853 return true;
854 if ((strict
855 ? STRICT_REG_OK_FOR_BASE_P (x)
856 : NONSTRICT_REG_OK_FOR_BASE_P (x))
857 && CONSTANT_ADDRESS_P (ofs))
858 return true;
859 }
860 }
861
862 /* If we're managing explicit relocations, LO_SUM is valid, as are small
863 data symbols. Avoid explicit relocations of modes larger than word
864 mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
865 else if (TARGET_EXPLICIT_RELOCS
866 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
867 {
868 if (small_symbolic_operand (x, Pmode))
869 return true;
870
871 if (GET_CODE (x) == LO_SUM)
872 {
873 rtx ofs = XEXP (x, 1);
874 x = XEXP (x, 0);
875
876 /* Discard non-paradoxical subregs. */
877 if (GET_CODE (x) == SUBREG
878 && (GET_MODE_SIZE (GET_MODE (x))
879 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
880 x = SUBREG_REG (x);
881
882 /* Must have a valid base register. */
883 if (! (REG_P (x)
884 && (strict
885 ? STRICT_REG_OK_FOR_BASE_P (x)
886 : NONSTRICT_REG_OK_FOR_BASE_P (x))))
887 return false;
888
889 /* The symbol must be local. */
890 if (local_symbolic_operand (ofs, Pmode)
891 || dtp32_symbolic_operand (ofs, Pmode)
892 || tp32_symbolic_operand (ofs, Pmode))
893 return true;
894 }
895 }
896
897 return false;
898 }
899
900 /* Build the SYMBOL_REF for __tls_get_addr. */
901
902 static GTY(()) rtx tls_get_addr_libfunc;
903
904 static rtx
905 get_tls_get_addr (void)
906 {
907 if (!tls_get_addr_libfunc)
908 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
909 return tls_get_addr_libfunc;
910 }
911
912 /* Try machine-dependent ways of modifying an illegitimate address
913 to be legitimate. If we find one, return the new, valid address. */
914
915 static rtx
916 alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
917 {
918 HOST_WIDE_INT addend;
919
920 /* If the address is (plus reg const_int) and the CONST_INT is not a
921 valid offset, compute the high part of the constant and add it to
922 the register. Then our address is (plus temp low-part-const). */
923 if (GET_CODE (x) == PLUS
924 && REG_P (XEXP (x, 0))
925 && CONST_INT_P (XEXP (x, 1))
926 && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
927 {
928 addend = INTVAL (XEXP (x, 1));
929 x = XEXP (x, 0);
930 goto split_addend;
931 }
932
933 /* If the address is (const (plus FOO const_int)), find the low-order
934 part of the CONST_INT. Then load FOO plus any high-order part of the
935 CONST_INT into a register. Our address is (plus reg low-part-const).
936 This is done to reduce the number of GOT entries. */
937 if (can_create_pseudo_p ()
938 && GET_CODE (x) == CONST
939 && GET_CODE (XEXP (x, 0)) == PLUS
940 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
941 {
942 addend = INTVAL (XEXP (XEXP (x, 0), 1));
943 x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
944 goto split_addend;
945 }
946
947 /* If we have a (plus reg const), emit the load as in (2), then add
948 the two registers, and finally generate (plus reg low-part-const) as
949 our address. */
950 if (can_create_pseudo_p ()
951 && GET_CODE (x) == PLUS
952 && REG_P (XEXP (x, 0))
953 && GET_CODE (XEXP (x, 1)) == CONST
954 && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
955 && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
956 {
957 addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
958 x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
959 XEXP (XEXP (XEXP (x, 1), 0), 0),
960 NULL_RTX, 1, OPTAB_LIB_WIDEN);
961 goto split_addend;
962 }
963
964 /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
965 Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
966 around +/- 32k offset. */
967 if (TARGET_EXPLICIT_RELOCS
968 && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
969 && symbolic_operand (x, Pmode))
970 {
971 rtx r0, r16, eqv, tga, tp, insn, dest, seq;
972
973 switch (tls_symbolic_operand_type (x))
974 {
975 case TLS_MODEL_NONE:
976 break;
977
978 case TLS_MODEL_GLOBAL_DYNAMIC:
979 start_sequence ();
980
981 r0 = gen_rtx_REG (Pmode, 0);
982 r16 = gen_rtx_REG (Pmode, 16);
983 tga = get_tls_get_addr ();
984 dest = gen_reg_rtx (Pmode);
985 seq = GEN_INT (alpha_next_sequence_number++);
986
987 emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
988 insn = gen_call_value_osf_tlsgd (r0, tga, seq);
989 insn = emit_call_insn (insn);
990 RTL_CONST_CALL_P (insn) = 1;
991 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
992
993 insn = get_insns ();
994 end_sequence ();
995
996 emit_libcall_block (insn, dest, r0, x);
997 return dest;
998
999 case TLS_MODEL_LOCAL_DYNAMIC:
1000 start_sequence ();
1001
1002 r0 = gen_rtx_REG (Pmode, 0);
1003 r16 = gen_rtx_REG (Pmode, 16);
1004 tga = get_tls_get_addr ();
1005 scratch = gen_reg_rtx (Pmode);
1006 seq = GEN_INT (alpha_next_sequence_number++);
1007
1008 emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
1009 insn = gen_call_value_osf_tlsldm (r0, tga, seq);
1010 insn = emit_call_insn (insn);
1011 RTL_CONST_CALL_P (insn) = 1;
1012 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
1013
1014 insn = get_insns ();
1015 end_sequence ();
1016
1017 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1018 UNSPEC_TLSLDM_CALL);
1019 emit_libcall_block (insn, scratch, r0, eqv);
1020
1021 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
1022 eqv = gen_rtx_CONST (Pmode, eqv);
1023
1024 if (alpha_tls_size == 64)
1025 {
1026 dest = gen_reg_rtx (Pmode);
1027 emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
1028 emit_insn (gen_adddi3 (dest, dest, scratch));
1029 return dest;
1030 }
1031 if (alpha_tls_size == 32)
1032 {
1033 insn = gen_rtx_HIGH (Pmode, eqv);
1034 insn = gen_rtx_PLUS (Pmode, scratch, insn);
1035 scratch = gen_reg_rtx (Pmode);
1036 emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
1037 }
1038 return gen_rtx_LO_SUM (Pmode, scratch, eqv);
1039
1040 case TLS_MODEL_INITIAL_EXEC:
1041 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1042 eqv = gen_rtx_CONST (Pmode, eqv);
1043 tp = gen_reg_rtx (Pmode);
1044 scratch = gen_reg_rtx (Pmode);
1045 dest = gen_reg_rtx (Pmode);
1046
1047 emit_insn (gen_load_tp (tp));
1048 emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
1049 emit_insn (gen_adddi3 (dest, tp, scratch));
1050 return dest;
1051
1052 case TLS_MODEL_LOCAL_EXEC:
1053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
1054 eqv = gen_rtx_CONST (Pmode, eqv);
1055 tp = gen_reg_rtx (Pmode);
1056
1057 emit_insn (gen_load_tp (tp));
1058 if (alpha_tls_size == 32)
1059 {
1060 insn = gen_rtx_HIGH (Pmode, eqv);
1061 insn = gen_rtx_PLUS (Pmode, tp, insn);
1062 tp = gen_reg_rtx (Pmode);
1063 emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
1064 }
1065 return gen_rtx_LO_SUM (Pmode, tp, eqv);
1066
1067 default:
1068 gcc_unreachable ();
1069 }
1070
1071 if (local_symbolic_operand (x, Pmode))
1072 {
1073 if (small_symbolic_operand (x, Pmode))
1074 return x;
1075 else
1076 {
1077 if (can_create_pseudo_p ())
1078 scratch = gen_reg_rtx (Pmode);
1079 emit_insn (gen_rtx_SET (VOIDmode, scratch,
1080 gen_rtx_HIGH (Pmode, x)));
1081 return gen_rtx_LO_SUM (Pmode, scratch, x);
1082 }
1083 }
1084 }
1085
1086 return NULL;
1087
1088 split_addend:
1089 {
1090 HOST_WIDE_INT low, high;
1091
1092 low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
1093 addend -= low;
1094 high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
1095 addend -= high;
1096
1097 if (addend)
1098 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
1099 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1100 1, OPTAB_LIB_WIDEN);
1101 if (high)
1102 x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
1103 (!can_create_pseudo_p () ? scratch : NULL_RTX),
1104 1, OPTAB_LIB_WIDEN);
1105
1106 return plus_constant (x, low);
1107 }
1108 }
1109
1110
1111 /* Try machine-dependent ways of modifying an illegitimate address
1112 to be legitimate. Return X or the new, valid address. */
1113
1114 static rtx
1115 alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1116 enum machine_mode mode)
1117 {
1118 rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
1119 return new_x ? new_x : x;
1120 }
1121
1122 /* Primarily this is required for TLS symbols, but given that our move
1123 patterns *ought* to be able to handle any symbol at any time, we
1124 should never be spilling symbolic operands to the constant pool, ever. */
1125
1126 static bool
1127 alpha_cannot_force_const_mem (rtx x)
1128 {
1129 enum rtx_code code = GET_CODE (x);
1130 return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
1131 }
1132
1133 /* We do not allow indirect calls to be optimized into sibling calls, nor
1134 can we allow a call to a function with a different GP to be optimized
1135 into a sibcall. */
1136
1137 static bool
1138 alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
1139 {
1140 /* Can't do indirect tail calls, since we don't know if the target
1141 uses the same GP. */
1142 if (!decl)
1143 return false;
1144
1145 /* Otherwise, we can make a tail call if the target function shares
1146 the same GP. */
1147 return decl_has_samegp (decl);
1148 }
1149
1150 int
1151 some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
1152 {
1153 rtx x = *px;
1154
1155 /* Don't re-split. */
1156 if (GET_CODE (x) == LO_SUM)
1157 return -1;
1158
1159 return small_symbolic_operand (x, Pmode) != 0;
1160 }
1161
1162 static int
1163 split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
1164 {
1165 rtx x = *px;
1166
1167 /* Don't re-split. */
1168 if (GET_CODE (x) == LO_SUM)
1169 return -1;
1170
1171 if (small_symbolic_operand (x, Pmode))
1172 {
1173 x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
1174 *px = x;
1175 return -1;
1176 }
1177
1178 return 0;
1179 }
1180
1181 rtx
1182 split_small_symbolic_operand (rtx x)
1183 {
1184 x = copy_insn (x);
1185 for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
1186 return x;
1187 }
1188
1189 /* Indicate that INSN cannot be duplicated. This is true for any insn
1190 that we've marked with gpdisp relocs, since those have to stay in
1191 1-1 correspondence with one another.
1192
1193 Technically we could copy them if we could set up a mapping from one
1194 sequence number to another, across the set of insns to be duplicated.
1195 This seems overly complicated and error-prone since interblock motion
1196 from sched-ebb could move one of the pair of insns to a different block.
1197
1198 Also cannot allow jsr insns to be duplicated. If they throw exceptions,
1199 then they'll be in a different block from their ldgp. Which could lead
1200 the bb reorder code to think that it would be ok to copy just the block
1201 containing the call and branch to the block containing the ldgp. */
1202
1203 static bool
1204 alpha_cannot_copy_insn_p (rtx insn)
1205 {
1206 if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
1207 return false;
1208 if (recog_memoized (insn) >= 0)
1209 return get_attr_cannot_copy (insn);
1210 else
1211 return false;
1212 }
1213
1214
1215 /* Try a machine-dependent way of reloading an illegitimate address
1216 operand. If we find one, push the reload and return the new rtx. */
1217
1218 rtx
1219 alpha_legitimize_reload_address (rtx x,
1220 enum machine_mode mode ATTRIBUTE_UNUSED,
1221 int opnum, int type,
1222 int ind_levels ATTRIBUTE_UNUSED)
1223 {
1224 /* We must recognize output that we have already generated ourselves. */
1225 if (GET_CODE (x) == PLUS
1226 && GET_CODE (XEXP (x, 0)) == PLUS
1227 && REG_P (XEXP (XEXP (x, 0), 0))
1228 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
1229 && CONST_INT_P (XEXP (x, 1)))
1230 {
1231 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1232 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1233 opnum, (enum reload_type) type);
1234 return x;
1235 }
1236
1237 /* We wish to handle large displacements off a base register by
1238 splitting the addend across an ldah and the mem insn. This
1239 cuts number of extra insns needed from 3 to 1. */
1240 if (GET_CODE (x) == PLUS
1241 && REG_P (XEXP (x, 0))
1242 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
1243 && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
1244 && GET_CODE (XEXP (x, 1)) == CONST_INT)
1245 {
1246 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
1247 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
1248 HOST_WIDE_INT high
1249 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
1250
1251 /* Check for 32-bit overflow. */
1252 if (high + low != val)
1253 return NULL_RTX;
1254
1255 /* Reload the high part into a base reg; leave the low part
1256 in the mem directly. */
1257 x = gen_rtx_PLUS (GET_MODE (x),
1258 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
1259 GEN_INT (high)),
1260 GEN_INT (low));
1261
1262 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
1263 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
1264 opnum, (enum reload_type) type);
1265 return x;
1266 }
1267
1268 return NULL_RTX;
1269 }
1270 \f
1271 /* Compute a (partial) cost for rtx X. Return true if the complete
1272 cost has been computed, and false if subexpressions should be
1273 scanned. In either case, *TOTAL contains the cost result. */
1274
1275 static bool
1276 alpha_rtx_costs (rtx x, int code, int outer_code, int *total,
1277 bool speed)
1278 {
1279 enum machine_mode mode = GET_MODE (x);
1280 bool float_mode_p = FLOAT_MODE_P (mode);
1281 const struct alpha_rtx_cost_data *cost_data;
1282
1283 if (!speed)
1284 cost_data = &alpha_rtx_cost_size;
1285 else
1286 cost_data = &alpha_rtx_cost_data[alpha_tune];
1287
1288 switch (code)
1289 {
1290 case CONST_INT:
1291 /* If this is an 8-bit constant, return zero since it can be used
1292 nearly anywhere with no cost. If it is a valid operand for an
1293 ADD or AND, likewise return 0 if we know it will be used in that
1294 context. Otherwise, return 2 since it might be used there later.
1295 All other constants take at least two insns. */
1296 if (INTVAL (x) >= 0 && INTVAL (x) < 256)
1297 {
1298 *total = 0;
1299 return true;
1300 }
1301 /* FALLTHRU */
1302
1303 case CONST_DOUBLE:
1304 if (x == CONST0_RTX (mode))
1305 *total = 0;
1306 else if ((outer_code == PLUS && add_operand (x, VOIDmode))
1307 || (outer_code == AND && and_operand (x, VOIDmode)))
1308 *total = 0;
1309 else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
1310 *total = 2;
1311 else
1312 *total = COSTS_N_INSNS (2);
1313 return true;
1314
1315 case CONST:
1316 case SYMBOL_REF:
1317 case LABEL_REF:
1318 if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
1319 *total = COSTS_N_INSNS (outer_code != MEM);
1320 else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
1321 *total = COSTS_N_INSNS (1 + (outer_code != MEM));
1322 else if (tls_symbolic_operand_type (x))
1323 /* Estimate of cost for call_pal rduniq. */
1324 /* ??? How many insns do we emit here? More than one... */
1325 *total = COSTS_N_INSNS (15);
1326 else
1327 /* Otherwise we do a load from the GOT. */
1328 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1329 return true;
1330
1331 case HIGH:
1332 /* This is effectively an add_operand. */
1333 *total = 2;
1334 return true;
1335
1336 case PLUS:
1337 case MINUS:
1338 if (float_mode_p)
1339 *total = cost_data->fp_add;
1340 else if (GET_CODE (XEXP (x, 0)) == MULT
1341 && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
1342 {
1343 *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
1344 (enum rtx_code) outer_code, speed)
1345 + rtx_cost (XEXP (x, 1),
1346 (enum rtx_code) outer_code, speed)
1347 + COSTS_N_INSNS (1));
1348 return true;
1349 }
1350 return false;
1351
1352 case MULT:
1353 if (float_mode_p)
1354 *total = cost_data->fp_mult;
1355 else if (mode == DImode)
1356 *total = cost_data->int_mult_di;
1357 else
1358 *total = cost_data->int_mult_si;
1359 return false;
1360
1361 case ASHIFT:
1362 if (CONST_INT_P (XEXP (x, 1))
1363 && INTVAL (XEXP (x, 1)) <= 3)
1364 {
1365 *total = COSTS_N_INSNS (1);
1366 return false;
1367 }
1368 /* FALLTHRU */
1369
1370 case ASHIFTRT:
1371 case LSHIFTRT:
1372 *total = cost_data->int_shift;
1373 return false;
1374
1375 case IF_THEN_ELSE:
1376 if (float_mode_p)
1377 *total = cost_data->fp_add;
1378 else
1379 *total = cost_data->int_cmov;
1380 return false;
1381
1382 case DIV:
1383 case UDIV:
1384 case MOD:
1385 case UMOD:
1386 if (!float_mode_p)
1387 *total = cost_data->int_div;
1388 else if (mode == SFmode)
1389 *total = cost_data->fp_div_sf;
1390 else
1391 *total = cost_data->fp_div_df;
1392 return false;
1393
1394 case MEM:
1395 *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
1396 return true;
1397
1398 case NEG:
1399 if (! float_mode_p)
1400 {
1401 *total = COSTS_N_INSNS (1);
1402 return false;
1403 }
1404 /* FALLTHRU */
1405
1406 case ABS:
1407 if (! float_mode_p)
1408 {
1409 *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
1410 return false;
1411 }
1412 /* FALLTHRU */
1413
1414 case FLOAT:
1415 case UNSIGNED_FLOAT:
1416 case FIX:
1417 case UNSIGNED_FIX:
1418 case FLOAT_TRUNCATE:
1419 *total = cost_data->fp_add;
1420 return false;
1421
1422 case FLOAT_EXTEND:
1423 if (MEM_P (XEXP (x, 0)))
1424 *total = 0;
1425 else
1426 *total = cost_data->fp_add;
1427 return false;
1428
1429 default:
1430 return false;
1431 }
1432 }
1433 \f
1434 /* REF is an alignable memory location. Place an aligned SImode
1435 reference into *PALIGNED_MEM and the number of bits to shift into
1436 *PBITNUM. SCRATCH is a free register for use in reloading out
1437 of range stack slots. */
1438
1439 void
1440 get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
1441 {
1442 rtx base;
1443 HOST_WIDE_INT disp, offset;
1444
1445 gcc_assert (MEM_P (ref));
1446
1447 if (reload_in_progress
1448 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1449 {
1450 base = find_replacement (&XEXP (ref, 0));
1451 gcc_assert (memory_address_p (GET_MODE (ref), base));
1452 }
1453 else
1454 base = XEXP (ref, 0);
1455
1456 if (GET_CODE (base) == PLUS)
1457 disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1458 else
1459 disp = 0;
1460
1461 /* Find the byte offset within an aligned word. If the memory itself is
1462 claimed to be aligned, believe it. Otherwise, aligned_memory_operand
1463 will have examined the base register and determined it is aligned, and
1464 thus displacements from it are naturally alignable. */
1465 if (MEM_ALIGN (ref) >= 32)
1466 offset = 0;
1467 else
1468 offset = disp & 3;
1469
1470 /* Access the entire aligned word. */
1471 *paligned_mem = widen_memory_access (ref, SImode, -offset);
1472
1473 /* Convert the byte offset within the word to a bit offset. */
1474 if (WORDS_BIG_ENDIAN)
1475 offset = 32 - (GET_MODE_BITSIZE (GET_MODE (ref)) + offset * 8);
1476 else
1477 offset *= 8;
1478 *pbitnum = GEN_INT (offset);
1479 }
1480
1481 /* Similar, but just get the address. Handle the two reload cases.
1482 Add EXTRA_OFFSET to the address we return. */
1483
1484 rtx
1485 get_unaligned_address (rtx ref)
1486 {
1487 rtx base;
1488 HOST_WIDE_INT offset = 0;
1489
1490 gcc_assert (MEM_P (ref));
1491
1492 if (reload_in_progress
1493 && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
1494 {
1495 base = find_replacement (&XEXP (ref, 0));
1496
1497 gcc_assert (memory_address_p (GET_MODE (ref), base));
1498 }
1499 else
1500 base = XEXP (ref, 0);
1501
1502 if (GET_CODE (base) == PLUS)
1503 offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
1504
1505 return plus_constant (base, offset);
1506 }
1507
1508 /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
1509 X is always returned in a register. */
1510
1511 rtx
1512 get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
1513 {
1514 if (GET_CODE (addr) == PLUS)
1515 {
1516 ofs += INTVAL (XEXP (addr, 1));
1517 addr = XEXP (addr, 0);
1518 }
1519
1520 return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
1521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1522 }
1523
1524 /* On the Alpha, all (non-symbolic) constants except zero go into
1525 a floating-point register via memory. Note that we cannot
1526 return anything that is not a subset of RCLASS, and that some
1527 symbolic constants cannot be dropped to memory. */
1528
1529 enum reg_class
1530 alpha_preferred_reload_class(rtx x, enum reg_class rclass)
1531 {
1532 /* Zero is present in any register class. */
1533 if (x == CONST0_RTX (GET_MODE (x)))
1534 return rclass;
1535
1536 /* These sorts of constants we can easily drop to memory. */
1537 if (CONST_INT_P (x)
1538 || GET_CODE (x) == CONST_DOUBLE
1539 || GET_CODE (x) == CONST_VECTOR)
1540 {
1541 if (rclass == FLOAT_REGS)
1542 return NO_REGS;
1543 if (rclass == ALL_REGS)
1544 return GENERAL_REGS;
1545 return rclass;
1546 }
1547
1548 /* All other kinds of constants should not (and in the case of HIGH
1549 cannot) be dropped to memory -- instead we use a GENERAL_REGS
1550 secondary reload. */
1551 if (CONSTANT_P (x))
1552 return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
1553
1554 return rclass;
1555 }
1556
1557 /* Inform reload about cases where moving X with a mode MODE to a register in
1558 RCLASS requires an extra scratch or immediate register. Return the class
1559 needed for the immediate register. */
1560
1561 static enum reg_class
1562 alpha_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
1563 enum machine_mode mode, secondary_reload_info *sri)
1564 {
1565 /* Loading and storing HImode or QImode values to and from memory
1566 usually requires a scratch register. */
1567 if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
1568 {
1569 if (any_memory_operand (x, mode))
1570 {
1571 if (in_p)
1572 {
1573 if (!aligned_memory_operand (x, mode))
1574 sri->icode = reload_in_optab[mode];
1575 }
1576 else
1577 sri->icode = reload_out_optab[mode];
1578 return NO_REGS;
1579 }
1580 }
1581
1582 /* We also cannot do integral arithmetic into FP regs, as might result
1583 from register elimination into a DImode fp register. */
1584 if (rclass == FLOAT_REGS)
1585 {
1586 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
1587 return GENERAL_REGS;
1588 if (in_p && INTEGRAL_MODE_P (mode)
1589 && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
1590 return GENERAL_REGS;
1591 }
1592
1593 return NO_REGS;
1594 }
1595 \f
1596 /* Subfunction of the following function. Update the flags of any MEM
1597 found in part of X. */
1598
1599 static int
1600 alpha_set_memflags_1 (rtx *xp, void *data)
1601 {
1602 rtx x = *xp, orig = (rtx) data;
1603
1604 if (!MEM_P (x))
1605 return 0;
1606
1607 MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
1608 MEM_IN_STRUCT_P (x) = MEM_IN_STRUCT_P (orig);
1609 MEM_SCALAR_P (x) = MEM_SCALAR_P (orig);
1610 MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
1611 MEM_READONLY_P (x) = MEM_READONLY_P (orig);
1612
1613 /* Sadly, we cannot use alias sets because the extra aliasing
1614 produced by the AND interferes. Given that two-byte quantities
1615 are the only thing we would be able to differentiate anyway,
1616 there does not seem to be any point in convoluting the early
1617 out of the alias check. */
1618
1619 return -1;
1620 }
1621
1622 /* Given SEQ, which is an INSN list, look for any MEMs in either
1623 a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
1624 volatile flags from REF into each of the MEMs found. If REF is not
1625 a MEM, don't do anything. */
1626
1627 void
1628 alpha_set_memflags (rtx seq, rtx ref)
1629 {
1630 rtx insn;
1631
1632 if (!MEM_P (ref))
1633 return;
1634
1635 /* This is only called from alpha.md, after having had something
1636 generated from one of the insn patterns. So if everything is
1637 zero, the pattern is already up-to-date. */
1638 if (!MEM_VOLATILE_P (ref)
1639 && !MEM_IN_STRUCT_P (ref)
1640 && !MEM_SCALAR_P (ref)
1641 && !MEM_NOTRAP_P (ref)
1642 && !MEM_READONLY_P (ref))
1643 return;
1644
1645 for (insn = seq; insn; insn = NEXT_INSN (insn))
1646 if (INSN_P (insn))
1647 for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
1648 else
1649 gcc_unreachable ();
1650 }
1651 \f
1652 static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
1653 int, bool);
1654
1655 /* Internal routine for alpha_emit_set_const to check for N or below insns.
1656 If NO_OUTPUT is true, then we only check to see if N insns are possible,
1657 and return pc_rtx if successful. */
1658
1659 static rtx
1660 alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
1661 HOST_WIDE_INT c, int n, bool no_output)
1662 {
1663 HOST_WIDE_INT new_const;
1664 int i, bits;
1665 /* Use a pseudo if highly optimizing and still generating RTL. */
1666 rtx subtarget
1667 = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
1668 rtx temp, insn;
1669
1670 /* If this is a sign-extended 32-bit constant, we can do this in at most
1671 three insns, so do it if we have enough insns left. We always have
1672 a sign-extended 32-bit constant when compiling on a narrow machine. */
1673
1674 if (HOST_BITS_PER_WIDE_INT != 64
1675 || c >> 31 == -1 || c >> 31 == 0)
1676 {
1677 HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
1678 HOST_WIDE_INT tmp1 = c - low;
1679 HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
1680 HOST_WIDE_INT extra = 0;
1681
1682 /* If HIGH will be interpreted as negative but the constant is
1683 positive, we must adjust it to do two ldha insns. */
1684
1685 if ((high & 0x8000) != 0 && c >= 0)
1686 {
1687 extra = 0x4000;
1688 tmp1 -= 0x40000000;
1689 high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
1690 }
1691
1692 if (c == low || (low == 0 && extra == 0))
1693 {
1694 /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
1695 but that meant that we can't handle INT_MIN on 32-bit machines
1696 (like NT/Alpha), because we recurse indefinitely through
1697 emit_move_insn to gen_movdi. So instead, since we know exactly
1698 what we want, create it explicitly. */
1699
1700 if (no_output)
1701 return pc_rtx;
1702 if (target == NULL)
1703 target = gen_reg_rtx (mode);
1704 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
1705 return target;
1706 }
1707 else if (n >= 2 + (extra != 0))
1708 {
1709 if (no_output)
1710 return pc_rtx;
1711 if (!can_create_pseudo_p ())
1712 {
1713 emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
1714 temp = target;
1715 }
1716 else
1717 temp = copy_to_suggested_reg (GEN_INT (high << 16),
1718 subtarget, mode);
1719
1720 /* As of 2002-02-23, addsi3 is only available when not optimizing.
1721 This means that if we go through expand_binop, we'll try to
1722 generate extensions, etc, which will require new pseudos, which
1723 will fail during some split phases. The SImode add patterns
1724 still exist, but are not named. So build the insns by hand. */
1725
1726 if (extra != 0)
1727 {
1728 if (! subtarget)
1729 subtarget = gen_reg_rtx (mode);
1730 insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
1731 insn = gen_rtx_SET (VOIDmode, subtarget, insn);
1732 emit_insn (insn);
1733 temp = subtarget;
1734 }
1735
1736 if (target == NULL)
1737 target = gen_reg_rtx (mode);
1738 insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1739 insn = gen_rtx_SET (VOIDmode, target, insn);
1740 emit_insn (insn);
1741 return target;
1742 }
1743 }
1744
1745 /* If we couldn't do it that way, try some other methods. But if we have
1746 no instructions left, don't bother. Likewise, if this is SImode and
1747 we can't make pseudos, we can't do anything since the expand_binop
1748 and expand_unop calls will widen and try to make pseudos. */
1749
1750 if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
1751 return 0;
1752
1753 /* Next, see if we can load a related constant and then shift and possibly
1754 negate it to get the constant we want. Try this once each increasing
1755 numbers of insns. */
1756
1757 for (i = 1; i < n; i++)
1758 {
1759 /* First, see if minus some low bits, we've an easy load of
1760 high bits. */
1761
1762 new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
1763 if (new_const != 0)
1764 {
1765 temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
1766 if (temp)
1767 {
1768 if (no_output)
1769 return temp;
1770 return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
1771 target, 0, OPTAB_WIDEN);
1772 }
1773 }
1774
1775 /* Next try complementing. */
1776 temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
1777 if (temp)
1778 {
1779 if (no_output)
1780 return temp;
1781 return expand_unop (mode, one_cmpl_optab, temp, target, 0);
1782 }
1783
1784 /* Next try to form a constant and do a left shift. We can do this
1785 if some low-order bits are zero; the exact_log2 call below tells
1786 us that information. The bits we are shifting out could be any
1787 value, but here we'll just try the 0- and sign-extended forms of
1788 the constant. To try to increase the chance of having the same
1789 constant in more than one insn, start at the highest number of
1790 bits to shift, but try all possibilities in case a ZAPNOT will
1791 be useful. */
1792
1793 bits = exact_log2 (c & -c);
1794 if (bits > 0)
1795 for (; bits > 0; bits--)
1796 {
1797 new_const = c >> bits;
1798 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1799 if (!temp && c < 0)
1800 {
1801 new_const = (unsigned HOST_WIDE_INT)c >> bits;
1802 temp = alpha_emit_set_const (subtarget, mode, new_const,
1803 i, no_output);
1804 }
1805 if (temp)
1806 {
1807 if (no_output)
1808 return temp;
1809 return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
1810 target, 0, OPTAB_WIDEN);
1811 }
1812 }
1813
1814 /* Now try high-order zero bits. Here we try the shifted-in bits as
1815 all zero and all ones. Be careful to avoid shifting outside the
1816 mode and to avoid shifting outside the host wide int size. */
1817 /* On narrow hosts, don't shift a 1 into the high bit, since we'll
1818 confuse the recursive call and set all of the high 32 bits. */
1819
1820 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1821 - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
1822 if (bits > 0)
1823 for (; bits > 0; bits--)
1824 {
1825 new_const = c << bits;
1826 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1827 if (!temp)
1828 {
1829 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1830 temp = alpha_emit_set_const (subtarget, mode, new_const,
1831 i, no_output);
1832 }
1833 if (temp)
1834 {
1835 if (no_output)
1836 return temp;
1837 return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
1838 target, 1, OPTAB_WIDEN);
1839 }
1840 }
1841
1842 /* Now try high-order 1 bits. We get that with a sign-extension.
1843 But one bit isn't enough here. Be careful to avoid shifting outside
1844 the mode and to avoid shifting outside the host wide int size. */
1845
1846 bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
1847 - floor_log2 (~ c) - 2);
1848 if (bits > 0)
1849 for (; bits > 0; bits--)
1850 {
1851 new_const = c << bits;
1852 temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
1853 if (!temp)
1854 {
1855 new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
1856 temp = alpha_emit_set_const (subtarget, mode, new_const,
1857 i, no_output);
1858 }
1859 if (temp)
1860 {
1861 if (no_output)
1862 return temp;
1863 return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
1864 target, 0, OPTAB_WIDEN);
1865 }
1866 }
1867 }
1868
1869 #if HOST_BITS_PER_WIDE_INT == 64
1870 /* Finally, see if can load a value into the target that is the same as the
1871 constant except that all bytes that are 0 are changed to be 0xff. If we
1872 can, then we can do a ZAPNOT to obtain the desired constant. */
1873
1874 new_const = c;
1875 for (i = 0; i < 64; i += 8)
1876 if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
1877 new_const |= (HOST_WIDE_INT) 0xff << i;
1878
1879 /* We are only called for SImode and DImode. If this is SImode, ensure that
1880 we are sign extended to a full word. */
1881
1882 if (mode == SImode)
1883 new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
1884
1885 if (new_const != c)
1886 {
1887 temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
1888 if (temp)
1889 {
1890 if (no_output)
1891 return temp;
1892 return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
1893 target, 0, OPTAB_WIDEN);
1894 }
1895 }
1896 #endif
1897
1898 return 0;
1899 }
1900
1901 /* Try to output insns to set TARGET equal to the constant C if it can be
1902 done in less than N insns. Do all computations in MODE. Returns the place
1903 where the output has been placed if it can be done and the insns have been
1904 emitted. If it would take more than N insns, zero is returned and no
1905 insns and emitted. */
1906
1907 static rtx
1908 alpha_emit_set_const (rtx target, enum machine_mode mode,
1909 HOST_WIDE_INT c, int n, bool no_output)
1910 {
1911 enum machine_mode orig_mode = mode;
1912 rtx orig_target = target;
1913 rtx result = 0;
1914 int i;
1915
1916 /* If we can't make any pseudos, TARGET is an SImode hard register, we
1917 can't load this constant in one insn, do this in DImode. */
1918 if (!can_create_pseudo_p () && mode == SImode
1919 && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
1920 {
1921 result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
1922 if (result)
1923 return result;
1924
1925 target = no_output ? NULL : gen_lowpart (DImode, target);
1926 mode = DImode;
1927 }
1928 else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
1929 {
1930 target = no_output ? NULL : gen_lowpart (DImode, target);
1931 mode = DImode;
1932 }
1933
1934 /* Try 1 insn, then 2, then up to N. */
1935 for (i = 1; i <= n; i++)
1936 {
1937 result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
1938 if (result)
1939 {
1940 rtx insn, set;
1941
1942 if (no_output)
1943 return result;
1944
1945 insn = get_last_insn ();
1946 set = single_set (insn);
1947 if (! CONSTANT_P (SET_SRC (set)))
1948 set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
1949 break;
1950 }
1951 }
1952
1953 /* Allow for the case where we changed the mode of TARGET. */
1954 if (result)
1955 {
1956 if (result == target)
1957 result = orig_target;
1958 else if (mode != orig_mode)
1959 result = gen_lowpart (orig_mode, result);
1960 }
1961
1962 return result;
1963 }
1964
1965 /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
1966 fall back to a straight forward decomposition. We do this to avoid
1967 exponential run times encountered when looking for longer sequences
1968 with alpha_emit_set_const. */
1969
1970 static rtx
1971 alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
1972 {
1973 HOST_WIDE_INT d1, d2, d3, d4;
1974
1975 /* Decompose the entire word */
1976 #if HOST_BITS_PER_WIDE_INT >= 64
1977 gcc_assert (c2 == -(c1 < 0));
1978 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1979 c1 -= d1;
1980 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1981 c1 = (c1 - d2) >> 32;
1982 d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1983 c1 -= d3;
1984 d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1985 gcc_assert (c1 == d4);
1986 #else
1987 d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
1988 c1 -= d1;
1989 d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1990 gcc_assert (c1 == d2);
1991 c2 += (d2 < 0);
1992 d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
1993 c2 -= d3;
1994 d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
1995 gcc_assert (c2 == d4);
1996 #endif
1997
1998 /* Construct the high word */
1999 if (d4)
2000 {
2001 emit_move_insn (target, GEN_INT (d4));
2002 if (d3)
2003 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
2004 }
2005 else
2006 emit_move_insn (target, GEN_INT (d3));
2007
2008 /* Shift it into place */
2009 emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
2010
2011 /* Add in the low bits. */
2012 if (d2)
2013 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
2014 if (d1)
2015 emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
2016
2017 return target;
2018 }
2019
2020 /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
2021 the low 64 bits. */
2022
2023 static void
2024 alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
2025 {
2026 HOST_WIDE_INT i0, i1;
2027
2028 if (GET_CODE (x) == CONST_VECTOR)
2029 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
2030
2031
2032 if (CONST_INT_P (x))
2033 {
2034 i0 = INTVAL (x);
2035 i1 = -(i0 < 0);
2036 }
2037 else if (HOST_BITS_PER_WIDE_INT >= 64)
2038 {
2039 i0 = CONST_DOUBLE_LOW (x);
2040 i1 = -(i0 < 0);
2041 }
2042 else
2043 {
2044 i0 = CONST_DOUBLE_LOW (x);
2045 i1 = CONST_DOUBLE_HIGH (x);
2046 }
2047
2048 *p0 = i0;
2049 *p1 = i1;
2050 }
2051
2052 /* Implement LEGITIMATE_CONSTANT_P. This is all constants for which we
2053 are willing to load the value into a register via a move pattern.
2054 Normally this is all symbolic constants, integral constants that
2055 take three or fewer instructions, and floating-point zero. */
2056
2057 bool
2058 alpha_legitimate_constant_p (rtx x)
2059 {
2060 enum machine_mode mode = GET_MODE (x);
2061 HOST_WIDE_INT i0, i1;
2062
2063 switch (GET_CODE (x))
2064 {
2065 case CONST:
2066 case LABEL_REF:
2067 case HIGH:
2068 return true;
2069
2070 case SYMBOL_REF:
2071 /* TLS symbols are never valid. */
2072 return SYMBOL_REF_TLS_MODEL (x) == 0;
2073
2074 case CONST_DOUBLE:
2075 if (x == CONST0_RTX (mode))
2076 return true;
2077 if (FLOAT_MODE_P (mode))
2078 return false;
2079 goto do_integer;
2080
2081 case CONST_VECTOR:
2082 if (x == CONST0_RTX (mode))
2083 return true;
2084 if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
2085 return false;
2086 if (GET_MODE_SIZE (mode) != 8)
2087 return false;
2088 goto do_integer;
2089
2090 case CONST_INT:
2091 do_integer:
2092 if (TARGET_BUILD_CONSTANTS)
2093 return true;
2094 alpha_extract_integer (x, &i0, &i1);
2095 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
2096 return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
2097 return false;
2098
2099 default:
2100 return false;
2101 }
2102 }
2103
2104 /* Operand 1 is known to be a constant, and should require more than one
2105 instruction to load. Emit that multi-part load. */
2106
2107 bool
2108 alpha_split_const_mov (enum machine_mode mode, rtx *operands)
2109 {
2110 HOST_WIDE_INT i0, i1;
2111 rtx temp = NULL_RTX;
2112
2113 alpha_extract_integer (operands[1], &i0, &i1);
2114
2115 if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
2116 temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
2117
2118 if (!temp && TARGET_BUILD_CONSTANTS)
2119 temp = alpha_emit_set_long_const (operands[0], i0, i1);
2120
2121 if (temp)
2122 {
2123 if (!rtx_equal_p (operands[0], temp))
2124 emit_move_insn (operands[0], temp);
2125 return true;
2126 }
2127
2128 return false;
2129 }
2130
2131 /* Expand a move instruction; return true if all work is done.
2132 We don't handle non-bwx subword loads here. */
2133
2134 bool
2135 alpha_expand_mov (enum machine_mode mode, rtx *operands)
2136 {
2137 rtx tmp;
2138
2139 /* If the output is not a register, the input must be. */
2140 if (MEM_P (operands[0])
2141 && ! reg_or_0_operand (operands[1], mode))
2142 operands[1] = force_reg (mode, operands[1]);
2143
2144 /* Allow legitimize_address to perform some simplifications. */
2145 if (mode == Pmode && symbolic_operand (operands[1], mode))
2146 {
2147 tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
2148 if (tmp)
2149 {
2150 if (tmp == operands[0])
2151 return true;
2152 operands[1] = tmp;
2153 return false;
2154 }
2155 }
2156
2157 /* Early out for non-constants and valid constants. */
2158 if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
2159 return false;
2160
2161 /* Split large integers. */
2162 if (CONST_INT_P (operands[1])
2163 || GET_CODE (operands[1]) == CONST_DOUBLE
2164 || GET_CODE (operands[1]) == CONST_VECTOR)
2165 {
2166 if (alpha_split_const_mov (mode, operands))
2167 return true;
2168 }
2169
2170 /* Otherwise we've nothing left but to drop the thing to memory. */
2171 tmp = force_const_mem (mode, operands[1]);
2172
2173 if (tmp == NULL_RTX)
2174 return false;
2175
2176 if (reload_in_progress)
2177 {
2178 emit_move_insn (operands[0], XEXP (tmp, 0));
2179 operands[1] = replace_equiv_address (tmp, operands[0]);
2180 }
2181 else
2182 operands[1] = validize_mem (tmp);
2183 return false;
2184 }
2185
2186 /* Expand a non-bwx QImode or HImode move instruction;
2187 return true if all work is done. */
2188
2189 bool
2190 alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
2191 {
2192 rtx seq;
2193
2194 /* If the output is not a register, the input must be. */
2195 if (MEM_P (operands[0]))
2196 operands[1] = force_reg (mode, operands[1]);
2197
2198 /* Handle four memory cases, unaligned and aligned for either the input
2199 or the output. The only case where we can be called during reload is
2200 for aligned loads; all other cases require temporaries. */
2201
2202 if (any_memory_operand (operands[1], mode))
2203 {
2204 if (aligned_memory_operand (operands[1], mode))
2205 {
2206 if (reload_in_progress)
2207 {
2208 if (mode == QImode)
2209 seq = gen_reload_inqi_aligned (operands[0], operands[1]);
2210 else
2211 seq = gen_reload_inhi_aligned (operands[0], operands[1]);
2212 emit_insn (seq);
2213 }
2214 else
2215 {
2216 rtx aligned_mem, bitnum;
2217 rtx scratch = gen_reg_rtx (SImode);
2218 rtx subtarget;
2219 bool copyout;
2220
2221 get_aligned_mem (operands[1], &aligned_mem, &bitnum);
2222
2223 subtarget = operands[0];
2224 if (REG_P (subtarget))
2225 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2226 else
2227 subtarget = gen_reg_rtx (DImode), copyout = true;
2228
2229 if (mode == QImode)
2230 seq = gen_aligned_loadqi (subtarget, aligned_mem,
2231 bitnum, scratch);
2232 else
2233 seq = gen_aligned_loadhi (subtarget, aligned_mem,
2234 bitnum, scratch);
2235 emit_insn (seq);
2236
2237 if (copyout)
2238 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2239 }
2240 }
2241 else
2242 {
2243 /* Don't pass these as parameters since that makes the generated
2244 code depend on parameter evaluation order which will cause
2245 bootstrap failures. */
2246
2247 rtx temp1, temp2, subtarget, ua;
2248 bool copyout;
2249
2250 temp1 = gen_reg_rtx (DImode);
2251 temp2 = gen_reg_rtx (DImode);
2252
2253 subtarget = operands[0];
2254 if (REG_P (subtarget))
2255 subtarget = gen_lowpart (DImode, subtarget), copyout = false;
2256 else
2257 subtarget = gen_reg_rtx (DImode), copyout = true;
2258
2259 ua = get_unaligned_address (operands[1]);
2260 if (mode == QImode)
2261 seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
2262 else
2263 seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
2264
2265 alpha_set_memflags (seq, operands[1]);
2266 emit_insn (seq);
2267
2268 if (copyout)
2269 emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
2270 }
2271 return true;
2272 }
2273
2274 if (any_memory_operand (operands[0], mode))
2275 {
2276 if (aligned_memory_operand (operands[0], mode))
2277 {
2278 rtx aligned_mem, bitnum;
2279 rtx temp1 = gen_reg_rtx (SImode);
2280 rtx temp2 = gen_reg_rtx (SImode);
2281
2282 get_aligned_mem (operands[0], &aligned_mem, &bitnum);
2283
2284 emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
2285 temp1, temp2));
2286 }
2287 else
2288 {
2289 rtx temp1 = gen_reg_rtx (DImode);
2290 rtx temp2 = gen_reg_rtx (DImode);
2291 rtx temp3 = gen_reg_rtx (DImode);
2292 rtx ua = get_unaligned_address (operands[0]);
2293
2294 if (mode == QImode)
2295 seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
2296 else
2297 seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
2298
2299 alpha_set_memflags (seq, operands[0]);
2300 emit_insn (seq);
2301 }
2302 return true;
2303 }
2304
2305 return false;
2306 }
2307
2308 /* Implement the movmisalign patterns. One of the operands is a memory
2309 that is not naturally aligned. Emit instructions to load it. */
2310
2311 void
2312 alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
2313 {
2314 /* Honor misaligned loads, for those we promised to do so. */
2315 if (MEM_P (operands[1]))
2316 {
2317 rtx tmp;
2318
2319 if (register_operand (operands[0], mode))
2320 tmp = operands[0];
2321 else
2322 tmp = gen_reg_rtx (mode);
2323
2324 alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
2325 if (tmp != operands[0])
2326 emit_move_insn (operands[0], tmp);
2327 }
2328 else if (MEM_P (operands[0]))
2329 {
2330 if (!reg_or_0_operand (operands[1], mode))
2331 operands[1] = force_reg (mode, operands[1]);
2332 alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
2333 }
2334 else
2335 gcc_unreachable ();
2336 }
2337
2338 /* Generate an unsigned DImode to FP conversion. This is the same code
2339 optabs would emit if we didn't have TFmode patterns.
2340
2341 For SFmode, this is the only construction I've found that can pass
2342 gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
2343 intermediates will work, because you'll get intermediate rounding
2344 that ruins the end result. Some of this could be fixed by turning
2345 on round-to-positive-infinity, but that requires diddling the fpsr,
2346 which kills performance. I tried turning this around and converting
2347 to a negative number, so that I could turn on /m, but either I did
2348 it wrong or there's something else cause I wound up with the exact
2349 same single-bit error. There is a branch-less form of this same code:
2350
2351 srl $16,1,$1
2352 and $16,1,$2
2353 cmplt $16,0,$3
2354 or $1,$2,$2
2355 cmovge $16,$16,$2
2356 itoft $3,$f10
2357 itoft $2,$f11
2358 cvtqs $f11,$f11
2359 adds $f11,$f11,$f0
2360 fcmoveq $f10,$f11,$f0
2361
2362 I'm not using it because it's the same number of instructions as
2363 this branch-full form, and it has more serialized long latency
2364 instructions on the critical path.
2365
2366 For DFmode, we can avoid rounding errors by breaking up the word
2367 into two pieces, converting them separately, and adding them back:
2368
2369 LC0: .long 0,0x5f800000
2370
2371 itoft $16,$f11
2372 lda $2,LC0
2373 cmplt $16,0,$1
2374 cpyse $f11,$f31,$f10
2375 cpyse $f31,$f11,$f11
2376 s4addq $1,$2,$1
2377 lds $f12,0($1)
2378 cvtqt $f10,$f10
2379 cvtqt $f11,$f11
2380 addt $f12,$f10,$f0
2381 addt $f0,$f11,$f0
2382
2383 This doesn't seem to be a clear-cut win over the optabs form.
2384 It probably all depends on the distribution of numbers being
2385 converted -- in the optabs form, all but high-bit-set has a
2386 much lower minimum execution time. */
2387
2388 void
2389 alpha_emit_floatuns (rtx operands[2])
2390 {
2391 rtx neglab, donelab, i0, i1, f0, in, out;
2392 enum machine_mode mode;
2393
2394 out = operands[0];
2395 in = force_reg (DImode, operands[1]);
2396 mode = GET_MODE (out);
2397 neglab = gen_label_rtx ();
2398 donelab = gen_label_rtx ();
2399 i0 = gen_reg_rtx (DImode);
2400 i1 = gen_reg_rtx (DImode);
2401 f0 = gen_reg_rtx (mode);
2402
2403 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
2404
2405 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
2406 emit_jump_insn (gen_jump (donelab));
2407 emit_barrier ();
2408
2409 emit_label (neglab);
2410
2411 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
2412 emit_insn (gen_anddi3 (i1, in, const1_rtx));
2413 emit_insn (gen_iordi3 (i0, i0, i1));
2414 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
2415 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
2416
2417 emit_label (donelab);
2418 }
2419
2420 /* Generate the comparison for a conditional branch. */
2421
2422 void
2423 alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
2424 {
2425 enum rtx_code cmp_code, branch_code;
2426 enum machine_mode branch_mode = VOIDmode;
2427 enum rtx_code code = GET_CODE (operands[0]);
2428 rtx op0 = operands[1], op1 = operands[2];
2429 rtx tem;
2430
2431 if (cmp_mode == TFmode)
2432 {
2433 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2434 op1 = const0_rtx;
2435 cmp_mode = DImode;
2436 }
2437
2438 /* The general case: fold the comparison code to the types of compares
2439 that we have, choosing the branch as necessary. */
2440 switch (code)
2441 {
2442 case EQ: case LE: case LT: case LEU: case LTU:
2443 case UNORDERED:
2444 /* We have these compares: */
2445 cmp_code = code, branch_code = NE;
2446 break;
2447
2448 case NE:
2449 case ORDERED:
2450 /* These must be reversed. */
2451 cmp_code = reverse_condition (code), branch_code = EQ;
2452 break;
2453
2454 case GE: case GT: case GEU: case GTU:
2455 /* For FP, we swap them, for INT, we reverse them. */
2456 if (cmp_mode == DFmode)
2457 {
2458 cmp_code = swap_condition (code);
2459 branch_code = NE;
2460 tem = op0, op0 = op1, op1 = tem;
2461 }
2462 else
2463 {
2464 cmp_code = reverse_condition (code);
2465 branch_code = EQ;
2466 }
2467 break;
2468
2469 default:
2470 gcc_unreachable ();
2471 }
2472
2473 if (cmp_mode == DFmode)
2474 {
2475 if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
2476 {
2477 /* When we are not as concerned about non-finite values, and we
2478 are comparing against zero, we can branch directly. */
2479 if (op1 == CONST0_RTX (DFmode))
2480 cmp_code = UNKNOWN, branch_code = code;
2481 else if (op0 == CONST0_RTX (DFmode))
2482 {
2483 /* Undo the swap we probably did just above. */
2484 tem = op0, op0 = op1, op1 = tem;
2485 branch_code = swap_condition (cmp_code);
2486 cmp_code = UNKNOWN;
2487 }
2488 }
2489 else
2490 {
2491 /* ??? We mark the branch mode to be CCmode to prevent the
2492 compare and branch from being combined, since the compare
2493 insn follows IEEE rules that the branch does not. */
2494 branch_mode = CCmode;
2495 }
2496 }
2497 else
2498 {
2499 /* The following optimizations are only for signed compares. */
2500 if (code != LEU && code != LTU && code != GEU && code != GTU)
2501 {
2502 /* Whee. Compare and branch against 0 directly. */
2503 if (op1 == const0_rtx)
2504 cmp_code = UNKNOWN, branch_code = code;
2505
2506 /* If the constants doesn't fit into an immediate, but can
2507 be generated by lda/ldah, we adjust the argument and
2508 compare against zero, so we can use beq/bne directly. */
2509 /* ??? Don't do this when comparing against symbols, otherwise
2510 we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
2511 be declared false out of hand (at least for non-weak). */
2512 else if (CONST_INT_P (op1)
2513 && (code == EQ || code == NE)
2514 && !(symbolic_operand (op0, VOIDmode)
2515 || (REG_P (op0) && REG_POINTER (op0))))
2516 {
2517 rtx n_op1 = GEN_INT (-INTVAL (op1));
2518
2519 if (! satisfies_constraint_I (op1)
2520 && (satisfies_constraint_K (n_op1)
2521 || satisfies_constraint_L (n_op1)))
2522 cmp_code = PLUS, branch_code = code, op1 = n_op1;
2523 }
2524 }
2525
2526 if (!reg_or_0_operand (op0, DImode))
2527 op0 = force_reg (DImode, op0);
2528 if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
2529 op1 = force_reg (DImode, op1);
2530 }
2531
2532 /* Emit an initial compare instruction, if necessary. */
2533 tem = op0;
2534 if (cmp_code != UNKNOWN)
2535 {
2536 tem = gen_reg_rtx (cmp_mode);
2537 emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
2538 }
2539
2540 /* Emit the branch instruction. */
2541 tem = gen_rtx_SET (VOIDmode, pc_rtx,
2542 gen_rtx_IF_THEN_ELSE (VOIDmode,
2543 gen_rtx_fmt_ee (branch_code,
2544 branch_mode, tem,
2545 CONST0_RTX (cmp_mode)),
2546 gen_rtx_LABEL_REF (VOIDmode,
2547 operands[3]),
2548 pc_rtx));
2549 emit_jump_insn (tem);
2550 }
2551
2552 /* Certain simplifications can be done to make invalid setcc operations
2553 valid. Return the final comparison, or NULL if we can't work. */
2554
2555 bool
2556 alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
2557 {
2558 enum rtx_code cmp_code;
2559 enum rtx_code code = GET_CODE (operands[1]);
2560 rtx op0 = operands[2], op1 = operands[3];
2561 rtx tmp;
2562
2563 if (cmp_mode == TFmode)
2564 {
2565 op0 = alpha_emit_xfloating_compare (&code, op0, op1);
2566 op1 = const0_rtx;
2567 cmp_mode = DImode;
2568 }
2569
2570 if (cmp_mode == DFmode && !TARGET_FIX)
2571 return 0;
2572
2573 /* The general case: fold the comparison code to the types of compares
2574 that we have, choosing the branch as necessary. */
2575
2576 cmp_code = UNKNOWN;
2577 switch (code)
2578 {
2579 case EQ: case LE: case LT: case LEU: case LTU:
2580 case UNORDERED:
2581 /* We have these compares. */
2582 if (cmp_mode == DFmode)
2583 cmp_code = code, code = NE;
2584 break;
2585
2586 case NE:
2587 if (cmp_mode == DImode && op1 == const0_rtx)
2588 break;
2589 /* FALLTHRU */
2590
2591 case ORDERED:
2592 cmp_code = reverse_condition (code);
2593 code = EQ;
2594 break;
2595
2596 case GE: case GT: case GEU: case GTU:
2597 /* These normally need swapping, but for integer zero we have
2598 special patterns that recognize swapped operands. */
2599 if (cmp_mode == DImode && op1 == const0_rtx)
2600 break;
2601 code = swap_condition (code);
2602 if (cmp_mode == DFmode)
2603 cmp_code = code, code = NE;
2604 tmp = op0, op0 = op1, op1 = tmp;
2605 break;
2606
2607 default:
2608 gcc_unreachable ();
2609 }
2610
2611 if (cmp_mode == DImode)
2612 {
2613 if (!register_operand (op0, DImode))
2614 op0 = force_reg (DImode, op0);
2615 if (!reg_or_8bit_operand (op1, DImode))
2616 op1 = force_reg (DImode, op1);
2617 }
2618
2619 /* Emit an initial compare instruction, if necessary. */
2620 if (cmp_code != UNKNOWN)
2621 {
2622 tmp = gen_reg_rtx (cmp_mode);
2623 emit_insn (gen_rtx_SET (VOIDmode, tmp,
2624 gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
2625
2626 op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
2627 op1 = const0_rtx;
2628 }
2629
2630 /* Emit the setcc instruction. */
2631 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2632 gen_rtx_fmt_ee (code, DImode, op0, op1)));
2633 return true;
2634 }
2635
2636
2637 /* Rewrite a comparison against zero CMP of the form
2638 (CODE (cc0) (const_int 0)) so it can be written validly in
2639 a conditional move (if_then_else CMP ...).
2640 If both of the operands that set cc0 are nonzero we must emit
2641 an insn to perform the compare (it can't be done within
2642 the conditional move). */
2643
2644 rtx
2645 alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
2646 {
2647 enum rtx_code code = GET_CODE (cmp);
2648 enum rtx_code cmov_code = NE;
2649 rtx op0 = XEXP (cmp, 0);
2650 rtx op1 = XEXP (cmp, 1);
2651 enum machine_mode cmp_mode
2652 = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
2653 enum machine_mode cmov_mode = VOIDmode;
2654 int local_fast_math = flag_unsafe_math_optimizations;
2655 rtx tem;
2656
2657 gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
2658
2659 if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
2660 {
2661 enum rtx_code cmp_code;
2662
2663 if (! TARGET_FIX)
2664 return 0;
2665
2666 /* If we have fp<->int register move instructions, do a cmov by
2667 performing the comparison in fp registers, and move the
2668 zero/nonzero value to integer registers, where we can then
2669 use a normal cmov, or vice-versa. */
2670
2671 switch (code)
2672 {
2673 case EQ: case LE: case LT: case LEU: case LTU:
2674 /* We have these compares. */
2675 cmp_code = code, code = NE;
2676 break;
2677
2678 case NE:
2679 /* This must be reversed. */
2680 cmp_code = EQ, code = EQ;
2681 break;
2682
2683 case GE: case GT: case GEU: case GTU:
2684 /* These normally need swapping, but for integer zero we have
2685 special patterns that recognize swapped operands. */
2686 if (cmp_mode == DImode && op1 == const0_rtx)
2687 cmp_code = code, code = NE;
2688 else
2689 {
2690 cmp_code = swap_condition (code);
2691 code = NE;
2692 tem = op0, op0 = op1, op1 = tem;
2693 }
2694 break;
2695
2696 default:
2697 gcc_unreachable ();
2698 }
2699
2700 tem = gen_reg_rtx (cmp_mode);
2701 emit_insn (gen_rtx_SET (VOIDmode, tem,
2702 gen_rtx_fmt_ee (cmp_code, cmp_mode,
2703 op0, op1)));
2704
2705 cmp_mode = cmp_mode == DImode ? DFmode : DImode;
2706 op0 = gen_lowpart (cmp_mode, tem);
2707 op1 = CONST0_RTX (cmp_mode);
2708 local_fast_math = 1;
2709 }
2710
2711 /* We may be able to use a conditional move directly.
2712 This avoids emitting spurious compares. */
2713 if (signed_comparison_operator (cmp, VOIDmode)
2714 && (cmp_mode == DImode || local_fast_math)
2715 && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
2716 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
2717
2718 /* We can't put the comparison inside the conditional move;
2719 emit a compare instruction and put that inside the
2720 conditional move. Make sure we emit only comparisons we have;
2721 swap or reverse as necessary. */
2722
2723 if (!can_create_pseudo_p ())
2724 return NULL_RTX;
2725
2726 switch (code)
2727 {
2728 case EQ: case LE: case LT: case LEU: case LTU:
2729 /* We have these compares: */
2730 break;
2731
2732 case NE:
2733 /* This must be reversed. */
2734 code = reverse_condition (code);
2735 cmov_code = EQ;
2736 break;
2737
2738 case GE: case GT: case GEU: case GTU:
2739 /* These must be swapped. */
2740 if (op1 != CONST0_RTX (cmp_mode))
2741 {
2742 code = swap_condition (code);
2743 tem = op0, op0 = op1, op1 = tem;
2744 }
2745 break;
2746
2747 default:
2748 gcc_unreachable ();
2749 }
2750
2751 if (cmp_mode == DImode)
2752 {
2753 if (!reg_or_0_operand (op0, DImode))
2754 op0 = force_reg (DImode, op0);
2755 if (!reg_or_8bit_operand (op1, DImode))
2756 op1 = force_reg (DImode, op1);
2757 }
2758
2759 /* ??? We mark the branch mode to be CCmode to prevent the compare
2760 and cmov from being combined, since the compare insn follows IEEE
2761 rules that the cmov does not. */
2762 if (cmp_mode == DFmode && !local_fast_math)
2763 cmov_mode = CCmode;
2764
2765 tem = gen_reg_rtx (cmp_mode);
2766 emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
2767 return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
2768 }
2769
2770 /* Simplify a conditional move of two constants into a setcc with
2771 arithmetic. This is done with a splitter since combine would
2772 just undo the work if done during code generation. It also catches
2773 cases we wouldn't have before cse. */
2774
2775 int
2776 alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
2777 rtx t_rtx, rtx f_rtx)
2778 {
2779 HOST_WIDE_INT t, f, diff;
2780 enum machine_mode mode;
2781 rtx target, subtarget, tmp;
2782
2783 mode = GET_MODE (dest);
2784 t = INTVAL (t_rtx);
2785 f = INTVAL (f_rtx);
2786 diff = t - f;
2787
2788 if (((code == NE || code == EQ) && diff < 0)
2789 || (code == GE || code == GT))
2790 {
2791 code = reverse_condition (code);
2792 diff = t, t = f, f = diff;
2793 diff = t - f;
2794 }
2795
2796 subtarget = target = dest;
2797 if (mode != DImode)
2798 {
2799 target = gen_lowpart (DImode, dest);
2800 if (can_create_pseudo_p ())
2801 subtarget = gen_reg_rtx (DImode);
2802 else
2803 subtarget = target;
2804 }
2805 /* Below, we must be careful to use copy_rtx on target and subtarget
2806 in intermediate insns, as they may be a subreg rtx, which may not
2807 be shared. */
2808
2809 if (f == 0 && exact_log2 (diff) > 0
2810 /* On EV6, we've got enough shifters to make non-arithmetic shifts
2811 viable over a longer latency cmove. On EV5, the E0 slot is a
2812 scarce resource, and on EV4 shift has the same latency as a cmove. */
2813 && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
2814 {
2815 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2816 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2817
2818 tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
2819 GEN_INT (exact_log2 (t)));
2820 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2821 }
2822 else if (f == 0 && t == -1)
2823 {
2824 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2825 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2826
2827 emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
2828 }
2829 else if (diff == 1 || diff == 4 || diff == 8)
2830 {
2831 rtx add_op;
2832
2833 tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
2834 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
2835
2836 if (diff == 1)
2837 emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
2838 else
2839 {
2840 add_op = GEN_INT (f);
2841 if (sext_add_operand (add_op, mode))
2842 {
2843 tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
2844 GEN_INT (diff));
2845 tmp = gen_rtx_PLUS (DImode, tmp, add_op);
2846 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
2847 }
2848 else
2849 return 0;
2850 }
2851 }
2852 else
2853 return 0;
2854
2855 return 1;
2856 }
2857 \f
2858 /* Look up the function X_floating library function name for the
2859 given operation. */
2860
2861 struct GTY(()) xfloating_op
2862 {
2863 const enum rtx_code code;
2864 const char *const GTY((skip)) osf_func;
2865 const char *const GTY((skip)) vms_func;
2866 rtx libcall;
2867 };
2868
2869 static GTY(()) struct xfloating_op xfloating_ops[] =
2870 {
2871 { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
2872 { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
2873 { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
2874 { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
2875 { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
2876 { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
2877 { LT, "_OtsLssX", "OTS$LSS_X", 0 },
2878 { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
2879 { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
2880 { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
2881 { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
2882 { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
2883 { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
2884 { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
2885 { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
2886 };
2887
2888 static GTY(()) struct xfloating_op vax_cvt_ops[] =
2889 {
2890 { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
2891 { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
2892 };
2893
2894 static rtx
2895 alpha_lookup_xfloating_lib_func (enum rtx_code code)
2896 {
2897 struct xfloating_op *ops = xfloating_ops;
2898 long n = ARRAY_SIZE (xfloating_ops);
2899 long i;
2900
2901 gcc_assert (TARGET_HAS_XFLOATING_LIBS);
2902
2903 /* How irritating. Nothing to key off for the main table. */
2904 if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
2905 {
2906 ops = vax_cvt_ops;
2907 n = ARRAY_SIZE (vax_cvt_ops);
2908 }
2909
2910 for (i = 0; i < n; ++i, ++ops)
2911 if (ops->code == code)
2912 {
2913 rtx func = ops->libcall;
2914 if (!func)
2915 {
2916 func = init_one_libfunc (TARGET_ABI_OPEN_VMS
2917 ? ops->vms_func : ops->osf_func);
2918 ops->libcall = func;
2919 }
2920 return func;
2921 }
2922
2923 gcc_unreachable ();
2924 }
2925
2926 /* Most X_floating operations take the rounding mode as an argument.
2927 Compute that here. */
2928
2929 static int
2930 alpha_compute_xfloating_mode_arg (enum rtx_code code,
2931 enum alpha_fp_rounding_mode round)
2932 {
2933 int mode;
2934
2935 switch (round)
2936 {
2937 case ALPHA_FPRM_NORM:
2938 mode = 2;
2939 break;
2940 case ALPHA_FPRM_MINF:
2941 mode = 1;
2942 break;
2943 case ALPHA_FPRM_CHOP:
2944 mode = 0;
2945 break;
2946 case ALPHA_FPRM_DYN:
2947 mode = 4;
2948 break;
2949 default:
2950 gcc_unreachable ();
2951
2952 /* XXX For reference, round to +inf is mode = 3. */
2953 }
2954
2955 if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
2956 mode |= 0x10000;
2957
2958 return mode;
2959 }
2960
2961 /* Emit an X_floating library function call.
2962
2963 Note that these functions do not follow normal calling conventions:
2964 TFmode arguments are passed in two integer registers (as opposed to
2965 indirect); TFmode return values appear in R16+R17.
2966
2967 FUNC is the function to call.
2968 TARGET is where the output belongs.
2969 OPERANDS are the inputs.
2970 NOPERANDS is the count of inputs.
2971 EQUIV is the expression equivalent for the function.
2972 */
2973
2974 static void
2975 alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
2976 int noperands, rtx equiv)
2977 {
2978 rtx usage = NULL_RTX, tmp, reg;
2979 int regno = 16, i;
2980
2981 start_sequence ();
2982
2983 for (i = 0; i < noperands; ++i)
2984 {
2985 switch (GET_MODE (operands[i]))
2986 {
2987 case TFmode:
2988 reg = gen_rtx_REG (TFmode, regno);
2989 regno += 2;
2990 break;
2991
2992 case DFmode:
2993 reg = gen_rtx_REG (DFmode, regno + 32);
2994 regno += 1;
2995 break;
2996
2997 case VOIDmode:
2998 gcc_assert (CONST_INT_P (operands[i]));
2999 /* FALLTHRU */
3000 case DImode:
3001 reg = gen_rtx_REG (DImode, regno);
3002 regno += 1;
3003 break;
3004
3005 default:
3006 gcc_unreachable ();
3007 }
3008
3009 emit_move_insn (reg, operands[i]);
3010 usage = alloc_EXPR_LIST (0, gen_rtx_USE (VOIDmode, reg), usage);
3011 }
3012
3013 switch (GET_MODE (target))
3014 {
3015 case TFmode:
3016 reg = gen_rtx_REG (TFmode, 16);
3017 break;
3018 case DFmode:
3019 reg = gen_rtx_REG (DFmode, 32);
3020 break;
3021 case DImode:
3022 reg = gen_rtx_REG (DImode, 0);
3023 break;
3024 default:
3025 gcc_unreachable ();
3026 }
3027
3028 tmp = gen_rtx_MEM (QImode, func);
3029 tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
3030 const0_rtx, const0_rtx));
3031 CALL_INSN_FUNCTION_USAGE (tmp) = usage;
3032 RTL_CONST_CALL_P (tmp) = 1;
3033
3034 tmp = get_insns ();
3035 end_sequence ();
3036
3037 emit_libcall_block (tmp, target, reg, equiv);
3038 }
3039
3040 /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
3041
3042 void
3043 alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
3044 {
3045 rtx func;
3046 int mode;
3047 rtx out_operands[3];
3048
3049 func = alpha_lookup_xfloating_lib_func (code);
3050 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3051
3052 out_operands[0] = operands[1];
3053 out_operands[1] = operands[2];
3054 out_operands[2] = GEN_INT (mode);
3055 alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
3056 gen_rtx_fmt_ee (code, TFmode, operands[1],
3057 operands[2]));
3058 }
3059
3060 /* Emit an X_floating library function call for a comparison. */
3061
3062 static rtx
3063 alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
3064 {
3065 enum rtx_code cmp_code, res_code;
3066 rtx func, out, operands[2], note;
3067
3068 /* X_floating library comparison functions return
3069 -1 unordered
3070 0 false
3071 1 true
3072 Convert the compare against the raw return value. */
3073
3074 cmp_code = *pcode;
3075 switch (cmp_code)
3076 {
3077 case UNORDERED:
3078 cmp_code = EQ;
3079 res_code = LT;
3080 break;
3081 case ORDERED:
3082 cmp_code = EQ;
3083 res_code = GE;
3084 break;
3085 case NE:
3086 res_code = NE;
3087 break;
3088 case EQ:
3089 case LT:
3090 case GT:
3091 case LE:
3092 case GE:
3093 res_code = GT;
3094 break;
3095 default:
3096 gcc_unreachable ();
3097 }
3098 *pcode = res_code;
3099
3100 func = alpha_lookup_xfloating_lib_func (cmp_code);
3101
3102 operands[0] = op0;
3103 operands[1] = op1;
3104 out = gen_reg_rtx (DImode);
3105
3106 /* What's actually returned is -1,0,1, not a proper boolean value,
3107 so use an EXPR_LIST as with a generic libcall instead of a
3108 comparison type expression. */
3109 note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
3110 note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
3111 note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
3112 alpha_emit_xfloating_libcall (func, out, operands, 2, note);
3113
3114 return out;
3115 }
3116
3117 /* Emit an X_floating library function call for a conversion. */
3118
3119 void
3120 alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
3121 {
3122 int noperands = 1, mode;
3123 rtx out_operands[2];
3124 rtx func;
3125 enum rtx_code code = orig_code;
3126
3127 if (code == UNSIGNED_FIX)
3128 code = FIX;
3129
3130 func = alpha_lookup_xfloating_lib_func (code);
3131
3132 out_operands[0] = operands[1];
3133
3134 switch (code)
3135 {
3136 case FIX:
3137 mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
3138 out_operands[1] = GEN_INT (mode);
3139 noperands = 2;
3140 break;
3141 case FLOAT_TRUNCATE:
3142 mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
3143 out_operands[1] = GEN_INT (mode);
3144 noperands = 2;
3145 break;
3146 default:
3147 break;
3148 }
3149
3150 alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
3151 gen_rtx_fmt_e (orig_code,
3152 GET_MODE (operands[0]),
3153 operands[1]));
3154 }
3155
3156 /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
3157 DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
3158 guarantee that the sequence
3159 set (OP[0] OP[2])
3160 set (OP[1] OP[3])
3161 is valid. Naturally, output operand ordering is little-endian.
3162 This is used by *movtf_internal and *movti_internal. */
3163
3164 void
3165 alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
3166 bool fixup_overlap)
3167 {
3168 switch (GET_CODE (operands[1]))
3169 {
3170 case REG:
3171 operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
3172 operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
3173 break;
3174
3175 case MEM:
3176 operands[3] = adjust_address (operands[1], DImode, 8);
3177 operands[2] = adjust_address (operands[1], DImode, 0);
3178 break;
3179
3180 case CONST_INT:
3181 case CONST_DOUBLE:
3182 gcc_assert (operands[1] == CONST0_RTX (mode));
3183 operands[2] = operands[3] = const0_rtx;
3184 break;
3185
3186 default:
3187 gcc_unreachable ();
3188 }
3189
3190 switch (GET_CODE (operands[0]))
3191 {
3192 case REG:
3193 operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
3194 operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
3195 break;
3196
3197 case MEM:
3198 operands[1] = adjust_address (operands[0], DImode, 8);
3199 operands[0] = adjust_address (operands[0], DImode, 0);
3200 break;
3201
3202 default:
3203 gcc_unreachable ();
3204 }
3205
3206 if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
3207 {
3208 rtx tmp;
3209 tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
3210 tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
3211 }
3212 }
3213
3214 /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
3215 op2 is a register containing the sign bit, operation is the
3216 logical operation to be performed. */
3217
3218 void
3219 alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
3220 {
3221 rtx high_bit = operands[2];
3222 rtx scratch;
3223 int move;
3224
3225 alpha_split_tmode_pair (operands, TFmode, false);
3226
3227 /* Detect three flavors of operand overlap. */
3228 move = 1;
3229 if (rtx_equal_p (operands[0], operands[2]))
3230 move = 0;
3231 else if (rtx_equal_p (operands[1], operands[2]))
3232 {
3233 if (rtx_equal_p (operands[0], high_bit))
3234 move = 2;
3235 else
3236 move = -1;
3237 }
3238
3239 if (move < 0)
3240 emit_move_insn (operands[0], operands[2]);
3241
3242 /* ??? If the destination overlaps both source tf and high_bit, then
3243 assume source tf is dead in its entirety and use the other half
3244 for a scratch register. Otherwise "scratch" is just the proper
3245 destination register. */
3246 scratch = operands[move < 2 ? 1 : 3];
3247
3248 emit_insn ((*operation) (scratch, high_bit, operands[3]));
3249
3250 if (move > 0)
3251 {
3252 emit_move_insn (operands[0], operands[2]);
3253 if (move > 1)
3254 emit_move_insn (operands[1], scratch);
3255 }
3256 }
3257 \f
3258 /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
3259 unaligned data:
3260
3261 unsigned: signed:
3262 word: ldq_u r1,X(r11) ldq_u r1,X(r11)
3263 ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
3264 lda r3,X(r11) lda r3,X+2(r11)
3265 extwl r1,r3,r1 extql r1,r3,r1
3266 extwh r2,r3,r2 extqh r2,r3,r2
3267 or r1.r2.r1 or r1,r2,r1
3268 sra r1,48,r1
3269
3270 long: ldq_u r1,X(r11) ldq_u r1,X(r11)
3271 ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
3272 lda r3,X(r11) lda r3,X(r11)
3273 extll r1,r3,r1 extll r1,r3,r1
3274 extlh r2,r3,r2 extlh r2,r3,r2
3275 or r1.r2.r1 addl r1,r2,r1
3276
3277 quad: ldq_u r1,X(r11)
3278 ldq_u r2,X+7(r11)
3279 lda r3,X(r11)
3280 extql r1,r3,r1
3281 extqh r2,r3,r2
3282 or r1.r2.r1
3283 */
3284
3285 void
3286 alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
3287 HOST_WIDE_INT ofs, int sign)
3288 {
3289 rtx meml, memh, addr, extl, exth, tmp, mema;
3290 enum machine_mode mode;
3291
3292 if (TARGET_BWX && size == 2)
3293 {
3294 meml = adjust_address (mem, QImode, ofs);
3295 memh = adjust_address (mem, QImode, ofs+1);
3296 if (BYTES_BIG_ENDIAN)
3297 tmp = meml, meml = memh, memh = tmp;
3298 extl = gen_reg_rtx (DImode);
3299 exth = gen_reg_rtx (DImode);
3300 emit_insn (gen_zero_extendqidi2 (extl, meml));
3301 emit_insn (gen_zero_extendqidi2 (exth, memh));
3302 exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
3303 NULL, 1, OPTAB_LIB_WIDEN);
3304 addr = expand_simple_binop (DImode, IOR, extl, exth,
3305 NULL, 1, OPTAB_LIB_WIDEN);
3306
3307 if (sign && GET_MODE (tgt) != HImode)
3308 {
3309 addr = gen_lowpart (HImode, addr);
3310 emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
3311 }
3312 else
3313 {
3314 if (GET_MODE (tgt) != DImode)
3315 addr = gen_lowpart (GET_MODE (tgt), addr);
3316 emit_move_insn (tgt, addr);
3317 }
3318 return;
3319 }
3320
3321 meml = gen_reg_rtx (DImode);
3322 memh = gen_reg_rtx (DImode);
3323 addr = gen_reg_rtx (DImode);
3324 extl = gen_reg_rtx (DImode);
3325 exth = gen_reg_rtx (DImode);
3326
3327 mema = XEXP (mem, 0);
3328 if (GET_CODE (mema) == LO_SUM)
3329 mema = force_reg (Pmode, mema);
3330
3331 /* AND addresses cannot be in any alias set, since they may implicitly
3332 alias surrounding code. Ideally we'd have some alias set that
3333 covered all types except those with alignment 8 or higher. */
3334
3335 tmp = change_address (mem, DImode,
3336 gen_rtx_AND (DImode,
3337 plus_constant (mema, ofs),
3338 GEN_INT (-8)));
3339 set_mem_alias_set (tmp, 0);
3340 emit_move_insn (meml, tmp);
3341
3342 tmp = change_address (mem, DImode,
3343 gen_rtx_AND (DImode,
3344 plus_constant (mema, ofs + size - 1),
3345 GEN_INT (-8)));
3346 set_mem_alias_set (tmp, 0);
3347 emit_move_insn (memh, tmp);
3348
3349 if (WORDS_BIG_ENDIAN && sign && (size == 2 || size == 4))
3350 {
3351 emit_move_insn (addr, plus_constant (mema, -1));
3352
3353 emit_insn (gen_extqh_be (extl, meml, addr));
3354 emit_insn (gen_extxl_be (exth, memh, GEN_INT (64), addr));
3355
3356 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3357 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (64 - size*8),
3358 addr, 1, OPTAB_WIDEN);
3359 }
3360 else if (sign && size == 2)
3361 {
3362 emit_move_insn (addr, plus_constant (mema, ofs+2));
3363
3364 emit_insn (gen_extxl_le (extl, meml, GEN_INT (64), addr));
3365 emit_insn (gen_extqh_le (exth, memh, addr));
3366
3367 /* We must use tgt here for the target. Alpha-vms port fails if we use
3368 addr for the target, because addr is marked as a pointer and combine
3369 knows that pointers are always sign-extended 32-bit values. */
3370 addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
3371 addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
3372 addr, 1, OPTAB_WIDEN);
3373 }
3374 else
3375 {
3376 if (WORDS_BIG_ENDIAN)
3377 {
3378 emit_move_insn (addr, plus_constant (mema, ofs+size-1));
3379 switch ((int) size)
3380 {
3381 case 2:
3382 emit_insn (gen_extwh_be (extl, meml, addr));
3383 mode = HImode;
3384 break;
3385
3386 case 4:
3387 emit_insn (gen_extlh_be (extl, meml, addr));
3388 mode = SImode;
3389 break;
3390
3391 case 8:
3392 emit_insn (gen_extqh_be (extl, meml, addr));
3393 mode = DImode;
3394 break;
3395
3396 default:
3397 gcc_unreachable ();
3398 }
3399 emit_insn (gen_extxl_be (exth, memh, GEN_INT (size*8), addr));
3400 }
3401 else
3402 {
3403 emit_move_insn (addr, plus_constant (mema, ofs));
3404 emit_insn (gen_extxl_le (extl, meml, GEN_INT (size*8), addr));
3405 switch ((int) size)
3406 {
3407 case 2:
3408 emit_insn (gen_extwh_le (exth, memh, addr));
3409 mode = HImode;
3410 break;
3411
3412 case 4:
3413 emit_insn (gen_extlh_le (exth, memh, addr));
3414 mode = SImode;
3415 break;
3416
3417 case 8:
3418 emit_insn (gen_extqh_le (exth, memh, addr));
3419 mode = DImode;
3420 break;
3421
3422 default:
3423 gcc_unreachable ();
3424 }
3425 }
3426
3427 addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
3428 gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
3429 sign, OPTAB_WIDEN);
3430 }
3431
3432 if (addr != tgt)
3433 emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
3434 }
3435
3436 /* Similarly, use ins and msk instructions to perform unaligned stores. */
3437
3438 void
3439 alpha_expand_unaligned_store (rtx dst, rtx src,
3440 HOST_WIDE_INT size, HOST_WIDE_INT ofs)
3441 {
3442 rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
3443
3444 if (TARGET_BWX && size == 2)
3445 {
3446 if (src != const0_rtx)
3447 {
3448 dstl = gen_lowpart (QImode, src);
3449 dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
3450 NULL, 1, OPTAB_LIB_WIDEN);
3451 dsth = gen_lowpart (QImode, dsth);
3452 }
3453 else
3454 dstl = dsth = const0_rtx;
3455
3456 meml = adjust_address (dst, QImode, ofs);
3457 memh = adjust_address (dst, QImode, ofs+1);
3458 if (BYTES_BIG_ENDIAN)
3459 addr = meml, meml = memh, memh = addr;
3460
3461 emit_move_insn (meml, dstl);
3462 emit_move_insn (memh, dsth);
3463 return;
3464 }
3465
3466 dstl = gen_reg_rtx (DImode);
3467 dsth = gen_reg_rtx (DImode);
3468 insl = gen_reg_rtx (DImode);
3469 insh = gen_reg_rtx (DImode);
3470
3471 dsta = XEXP (dst, 0);
3472 if (GET_CODE (dsta) == LO_SUM)
3473 dsta = force_reg (Pmode, dsta);
3474
3475 /* AND addresses cannot be in any alias set, since they may implicitly
3476 alias surrounding code. Ideally we'd have some alias set that
3477 covered all types except those with alignment 8 or higher. */
3478
3479 meml = change_address (dst, DImode,
3480 gen_rtx_AND (DImode,
3481 plus_constant (dsta, ofs),
3482 GEN_INT (-8)));
3483 set_mem_alias_set (meml, 0);
3484
3485 memh = change_address (dst, DImode,
3486 gen_rtx_AND (DImode,
3487 plus_constant (dsta, ofs + size - 1),
3488 GEN_INT (-8)));
3489 set_mem_alias_set (memh, 0);
3490
3491 emit_move_insn (dsth, memh);
3492 emit_move_insn (dstl, meml);
3493 if (WORDS_BIG_ENDIAN)
3494 {
3495 addr = copy_addr_to_reg (plus_constant (dsta, ofs+size-1));
3496
3497 if (src != const0_rtx)
3498 {
3499 switch ((int) size)
3500 {
3501 case 2:
3502 emit_insn (gen_inswl_be (insh, gen_lowpart (HImode,src), addr));
3503 break;
3504 case 4:
3505 emit_insn (gen_insll_be (insh, gen_lowpart (SImode,src), addr));
3506 break;
3507 case 8:
3508 emit_insn (gen_insql_be (insh, gen_lowpart (DImode,src), addr));
3509 break;
3510 }
3511 emit_insn (gen_insxh (insl, gen_lowpart (DImode, src),
3512 GEN_INT (size*8), addr));
3513 }
3514
3515 switch ((int) size)
3516 {
3517 case 2:
3518 emit_insn (gen_mskxl_be (dsth, dsth, GEN_INT (0xffff), addr));
3519 break;
3520 case 4:
3521 {
3522 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3523 emit_insn (gen_mskxl_be (dsth, dsth, msk, addr));
3524 break;
3525 }
3526 case 8:
3527 emit_insn (gen_mskxl_be (dsth, dsth, constm1_rtx, addr));
3528 break;
3529 }
3530
3531 emit_insn (gen_mskxh (dstl, dstl, GEN_INT (size*8), addr));
3532 }
3533 else
3534 {
3535 addr = copy_addr_to_reg (plus_constant (dsta, ofs));
3536
3537 if (src != CONST0_RTX (GET_MODE (src)))
3538 {
3539 emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
3540 GEN_INT (size*8), addr));
3541
3542 switch ((int) size)
3543 {
3544 case 2:
3545 emit_insn (gen_inswl_le (insl, gen_lowpart (HImode, src), addr));
3546 break;
3547 case 4:
3548 emit_insn (gen_insll_le (insl, gen_lowpart (SImode, src), addr));
3549 break;
3550 case 8:
3551 emit_insn (gen_insql_le (insl, gen_lowpart (DImode, src), addr));
3552 break;
3553 }
3554 }
3555
3556 emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
3557
3558 switch ((int) size)
3559 {
3560 case 2:
3561 emit_insn (gen_mskxl_le (dstl, dstl, GEN_INT (0xffff), addr));
3562 break;
3563 case 4:
3564 {
3565 rtx msk = immed_double_const (0xffffffff, 0, DImode);
3566 emit_insn (gen_mskxl_le (dstl, dstl, msk, addr));
3567 break;
3568 }
3569 case 8:
3570 emit_insn (gen_mskxl_le (dstl, dstl, constm1_rtx, addr));
3571 break;
3572 }
3573 }
3574
3575 if (src != CONST0_RTX (GET_MODE (src)))
3576 {
3577 dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
3578 dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
3579 }
3580
3581 if (WORDS_BIG_ENDIAN)
3582 {
3583 emit_move_insn (meml, dstl);
3584 emit_move_insn (memh, dsth);
3585 }
3586 else
3587 {
3588 /* Must store high before low for degenerate case of aligned. */
3589 emit_move_insn (memh, dsth);
3590 emit_move_insn (meml, dstl);
3591 }
3592 }
3593
3594 /* The block move code tries to maximize speed by separating loads and
3595 stores at the expense of register pressure: we load all of the data
3596 before we store it back out. There are two secondary effects worth
3597 mentioning, that this speeds copying to/from aligned and unaligned
3598 buffers, and that it makes the code significantly easier to write. */
3599
3600 #define MAX_MOVE_WORDS 8
3601
3602 /* Load an integral number of consecutive unaligned quadwords. */
3603
3604 static void
3605 alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
3606 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3607 {
3608 rtx const im8 = GEN_INT (-8);
3609 rtx const i64 = GEN_INT (64);
3610 rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
3611 rtx sreg, areg, tmp, smema;
3612 HOST_WIDE_INT i;
3613
3614 smema = XEXP (smem, 0);
3615 if (GET_CODE (smema) == LO_SUM)
3616 smema = force_reg (Pmode, smema);
3617
3618 /* Generate all the tmp registers we need. */
3619 for (i = 0; i < words; ++i)
3620 {
3621 data_regs[i] = out_regs[i];
3622 ext_tmps[i] = gen_reg_rtx (DImode);
3623 }
3624 data_regs[words] = gen_reg_rtx (DImode);
3625
3626 if (ofs != 0)
3627 smem = adjust_address (smem, GET_MODE (smem), ofs);
3628
3629 /* Load up all of the source data. */
3630 for (i = 0; i < words; ++i)
3631 {
3632 tmp = change_address (smem, DImode,
3633 gen_rtx_AND (DImode,
3634 plus_constant (smema, 8*i),
3635 im8));
3636 set_mem_alias_set (tmp, 0);
3637 emit_move_insn (data_regs[i], tmp);
3638 }
3639
3640 tmp = change_address (smem, DImode,
3641 gen_rtx_AND (DImode,
3642 plus_constant (smema, 8*words - 1),
3643 im8));
3644 set_mem_alias_set (tmp, 0);
3645 emit_move_insn (data_regs[words], tmp);
3646
3647 /* Extract the half-word fragments. Unfortunately DEC decided to make
3648 extxh with offset zero a noop instead of zeroing the register, so
3649 we must take care of that edge condition ourselves with cmov. */
3650
3651 sreg = copy_addr_to_reg (smema);
3652 areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
3653 1, OPTAB_WIDEN);
3654 if (WORDS_BIG_ENDIAN)
3655 emit_move_insn (sreg, plus_constant (sreg, 7));
3656 for (i = 0; i < words; ++i)
3657 {
3658 if (WORDS_BIG_ENDIAN)
3659 {
3660 emit_insn (gen_extqh_be (data_regs[i], data_regs[i], sreg));
3661 emit_insn (gen_extxl_be (ext_tmps[i], data_regs[i+1], i64, sreg));
3662 }
3663 else
3664 {
3665 emit_insn (gen_extxl_le (data_regs[i], data_regs[i], i64, sreg));
3666 emit_insn (gen_extqh_le (ext_tmps[i], data_regs[i+1], sreg));
3667 }
3668 emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
3669 gen_rtx_IF_THEN_ELSE (DImode,
3670 gen_rtx_EQ (DImode, areg,
3671 const0_rtx),
3672 const0_rtx, ext_tmps[i])));
3673 }
3674
3675 /* Merge the half-words into whole words. */
3676 for (i = 0; i < words; ++i)
3677 {
3678 out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
3679 ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
3680 }
3681 }
3682
3683 /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
3684 may be NULL to store zeros. */
3685
3686 static void
3687 alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
3688 HOST_WIDE_INT words, HOST_WIDE_INT ofs)
3689 {
3690 rtx const im8 = GEN_INT (-8);
3691 rtx const i64 = GEN_INT (64);
3692 rtx ins_tmps[MAX_MOVE_WORDS];
3693 rtx st_tmp_1, st_tmp_2, dreg;
3694 rtx st_addr_1, st_addr_2, dmema;
3695 HOST_WIDE_INT i;
3696
3697 dmema = XEXP (dmem, 0);
3698 if (GET_CODE (dmema) == LO_SUM)
3699 dmema = force_reg (Pmode, dmema);
3700
3701 /* Generate all the tmp registers we need. */
3702 if (data_regs != NULL)
3703 for (i = 0; i < words; ++i)
3704 ins_tmps[i] = gen_reg_rtx(DImode);
3705 st_tmp_1 = gen_reg_rtx(DImode);
3706 st_tmp_2 = gen_reg_rtx(DImode);
3707
3708 if (ofs != 0)
3709 dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
3710
3711 st_addr_2 = change_address (dmem, DImode,
3712 gen_rtx_AND (DImode,
3713 plus_constant (dmema, words*8 - 1),
3714 im8));
3715 set_mem_alias_set (st_addr_2, 0);
3716
3717 st_addr_1 = change_address (dmem, DImode,
3718 gen_rtx_AND (DImode, dmema, im8));
3719 set_mem_alias_set (st_addr_1, 0);
3720
3721 /* Load up the destination end bits. */
3722 emit_move_insn (st_tmp_2, st_addr_2);
3723 emit_move_insn (st_tmp_1, st_addr_1);
3724
3725 /* Shift the input data into place. */
3726 dreg = copy_addr_to_reg (dmema);
3727 if (WORDS_BIG_ENDIAN)
3728 emit_move_insn (dreg, plus_constant (dreg, 7));
3729 if (data_regs != NULL)
3730 {
3731 for (i = words-1; i >= 0; --i)
3732 {
3733 if (WORDS_BIG_ENDIAN)
3734 {
3735 emit_insn (gen_insql_be (ins_tmps[i], data_regs[i], dreg));
3736 emit_insn (gen_insxh (data_regs[i], data_regs[i], i64, dreg));
3737 }
3738 else
3739 {
3740 emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
3741 emit_insn (gen_insql_le (data_regs[i], data_regs[i], dreg));
3742 }
3743 }
3744 for (i = words-1; i > 0; --i)
3745 {
3746 ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
3747 ins_tmps[i-1], ins_tmps[i-1], 1,
3748 OPTAB_WIDEN);
3749 }
3750 }
3751
3752 /* Split and merge the ends with the destination data. */
3753 if (WORDS_BIG_ENDIAN)
3754 {
3755 emit_insn (gen_mskxl_be (st_tmp_2, st_tmp_2, constm1_rtx, dreg));
3756 emit_insn (gen_mskxh (st_tmp_1, st_tmp_1, i64, dreg));
3757 }
3758 else
3759 {
3760 emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
3761 emit_insn (gen_mskxl_le (st_tmp_1, st_tmp_1, constm1_rtx, dreg));
3762 }
3763
3764 if (data_regs != NULL)
3765 {
3766 st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
3767 st_tmp_2, 1, OPTAB_WIDEN);
3768 st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
3769 st_tmp_1, 1, OPTAB_WIDEN);
3770 }
3771
3772 /* Store it all. */
3773 if (WORDS_BIG_ENDIAN)
3774 emit_move_insn (st_addr_1, st_tmp_1);
3775 else
3776 emit_move_insn (st_addr_2, st_tmp_2);
3777 for (i = words-1; i > 0; --i)
3778 {
3779 rtx tmp = change_address (dmem, DImode,
3780 gen_rtx_AND (DImode,
3781 plus_constant(dmema,
3782 WORDS_BIG_ENDIAN ? i*8-1 : i*8),
3783 im8));
3784 set_mem_alias_set (tmp, 0);
3785 emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
3786 }
3787 if (WORDS_BIG_ENDIAN)
3788 emit_move_insn (st_addr_2, st_tmp_2);
3789 else
3790 emit_move_insn (st_addr_1, st_tmp_1);
3791 }
3792
3793
3794 /* Expand string/block move operations.
3795
3796 operands[0] is the pointer to the destination.
3797 operands[1] is the pointer to the source.
3798 operands[2] is the number of bytes to move.
3799 operands[3] is the alignment. */
3800
3801 int
3802 alpha_expand_block_move (rtx operands[])
3803 {
3804 rtx bytes_rtx = operands[2];
3805 rtx align_rtx = operands[3];
3806 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
3807 HOST_WIDE_INT bytes = orig_bytes;
3808 HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
3809 HOST_WIDE_INT dst_align = src_align;
3810 rtx orig_src = operands[1];
3811 rtx orig_dst = operands[0];
3812 rtx data_regs[2 * MAX_MOVE_WORDS + 16];
3813 rtx tmp;
3814 unsigned int i, words, ofs, nregs = 0;
3815
3816 if (orig_bytes <= 0)
3817 return 1;
3818 else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
3819 return 0;
3820
3821 /* Look for additional alignment information from recorded register info. */
3822
3823 tmp = XEXP (orig_src, 0);
3824 if (REG_P (tmp))
3825 src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3826 else if (GET_CODE (tmp) == PLUS
3827 && REG_P (XEXP (tmp, 0))
3828 && CONST_INT_P (XEXP (tmp, 1)))
3829 {
3830 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3831 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3832
3833 if (a > src_align)
3834 {
3835 if (a >= 64 && c % 8 == 0)
3836 src_align = 64;
3837 else if (a >= 32 && c % 4 == 0)
3838 src_align = 32;
3839 else if (a >= 16 && c % 2 == 0)
3840 src_align = 16;
3841 }
3842 }
3843
3844 tmp = XEXP (orig_dst, 0);
3845 if (REG_P (tmp))
3846 dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
3847 else if (GET_CODE (tmp) == PLUS
3848 && REG_P (XEXP (tmp, 0))
3849 && CONST_INT_P (XEXP (tmp, 1)))
3850 {
3851 unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
3852 unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
3853
3854 if (a > dst_align)
3855 {
3856 if (a >= 64 && c % 8 == 0)
3857 dst_align = 64;
3858 else if (a >= 32 && c % 4 == 0)
3859 dst_align = 32;
3860 else if (a >= 16 && c % 2 == 0)
3861 dst_align = 16;
3862 }
3863 }
3864
3865 ofs = 0;
3866 if (src_align >= 64 && bytes >= 8)
3867 {
3868 words = bytes / 8;
3869
3870 for (i = 0; i < words; ++i)
3871 data_regs[nregs + i] = gen_reg_rtx (DImode);
3872
3873 for (i = 0; i < words; ++i)
3874 emit_move_insn (data_regs[nregs + i],
3875 adjust_address (orig_src, DImode, ofs + i * 8));
3876
3877 nregs += words;
3878 bytes -= words * 8;
3879 ofs += words * 8;
3880 }
3881
3882 if (src_align >= 32 && bytes >= 4)
3883 {
3884 words = bytes / 4;
3885
3886 for (i = 0; i < words; ++i)
3887 data_regs[nregs + i] = gen_reg_rtx (SImode);
3888
3889 for (i = 0; i < words; ++i)
3890 emit_move_insn (data_regs[nregs + i],
3891 adjust_address (orig_src, SImode, ofs + i * 4));
3892
3893 nregs += words;
3894 bytes -= words * 4;
3895 ofs += words * 4;
3896 }
3897
3898 if (bytes >= 8)
3899 {
3900 words = bytes / 8;
3901
3902 for (i = 0; i < words+1; ++i)
3903 data_regs[nregs + i] = gen_reg_rtx (DImode);
3904
3905 alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
3906 words, ofs);
3907
3908 nregs += words;
3909 bytes -= words * 8;
3910 ofs += words * 8;
3911 }
3912
3913 if (! TARGET_BWX && bytes >= 4)
3914 {
3915 data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
3916 alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
3917 bytes -= 4;
3918 ofs += 4;
3919 }
3920
3921 if (bytes >= 2)
3922 {
3923 if (src_align >= 16)
3924 {
3925 do {
3926 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3927 emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
3928 bytes -= 2;
3929 ofs += 2;
3930 } while (bytes >= 2);
3931 }
3932 else if (! TARGET_BWX)
3933 {
3934 data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
3935 alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
3936 bytes -= 2;
3937 ofs += 2;
3938 }
3939 }
3940
3941 while (bytes > 0)
3942 {
3943 data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
3944 emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
3945 bytes -= 1;
3946 ofs += 1;
3947 }
3948
3949 gcc_assert (nregs <= ARRAY_SIZE (data_regs));
3950
3951 /* Now save it back out again. */
3952
3953 i = 0, ofs = 0;
3954
3955 /* Write out the data in whatever chunks reading the source allowed. */
3956 if (dst_align >= 64)
3957 {
3958 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3959 {
3960 emit_move_insn (adjust_address (orig_dst, DImode, ofs),
3961 data_regs[i]);
3962 ofs += 8;
3963 i++;
3964 }
3965 }
3966
3967 if (dst_align >= 32)
3968 {
3969 /* If the source has remaining DImode regs, write them out in
3970 two pieces. */
3971 while (i < nregs && GET_MODE (data_regs[i]) == DImode)
3972 {
3973 tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
3974 NULL_RTX, 1, OPTAB_WIDEN);
3975
3976 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3977 gen_lowpart (SImode, data_regs[i]));
3978 emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
3979 gen_lowpart (SImode, tmp));
3980 ofs += 8;
3981 i++;
3982 }
3983
3984 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
3985 {
3986 emit_move_insn (adjust_address (orig_dst, SImode, ofs),
3987 data_regs[i]);
3988 ofs += 4;
3989 i++;
3990 }
3991 }
3992
3993 if (i < nregs && GET_MODE (data_regs[i]) == DImode)
3994 {
3995 /* Write out a remaining block of words using unaligned methods. */
3996
3997 for (words = 1; i + words < nregs; words++)
3998 if (GET_MODE (data_regs[i + words]) != DImode)
3999 break;
4000
4001 if (words == 1)
4002 alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
4003 else
4004 alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
4005 words, ofs);
4006
4007 i += words;
4008 ofs += words * 8;
4009 }
4010
4011 /* Due to the above, this won't be aligned. */
4012 /* ??? If we have more than one of these, consider constructing full
4013 words in registers and using alpha_expand_unaligned_store_words. */
4014 while (i < nregs && GET_MODE (data_regs[i]) == SImode)
4015 {
4016 alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
4017 ofs += 4;
4018 i++;
4019 }
4020
4021 if (dst_align >= 16)
4022 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4023 {
4024 emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
4025 i++;
4026 ofs += 2;
4027 }
4028 else
4029 while (i < nregs && GET_MODE (data_regs[i]) == HImode)
4030 {
4031 alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
4032 i++;
4033 ofs += 2;
4034 }
4035
4036 /* The remainder must be byte copies. */
4037 while (i < nregs)
4038 {
4039 gcc_assert (GET_MODE (data_regs[i]) == QImode);
4040 emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
4041 i++;
4042 ofs += 1;
4043 }
4044
4045 return 1;
4046 }
4047
4048 int
4049 alpha_expand_block_clear (rtx operands[])
4050 {
4051 rtx bytes_rtx = operands[1];
4052 rtx align_rtx = operands[3];
4053 HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
4054 HOST_WIDE_INT bytes = orig_bytes;
4055 HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
4056 HOST_WIDE_INT alignofs = 0;
4057 rtx orig_dst = operands[0];
4058 rtx tmp;
4059 int i, words, ofs = 0;
4060
4061 if (orig_bytes <= 0)
4062 return 1;
4063 if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
4064 return 0;
4065
4066 /* Look for stricter alignment. */
4067 tmp = XEXP (orig_dst, 0);
4068 if (REG_P (tmp))
4069 align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
4070 else if (GET_CODE (tmp) == PLUS
4071 && REG_P (XEXP (tmp, 0))
4072 && CONST_INT_P (XEXP (tmp, 1)))
4073 {
4074 HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
4075 int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
4076
4077 if (a > align)
4078 {
4079 if (a >= 64)
4080 align = a, alignofs = 8 - c % 8;
4081 else if (a >= 32)
4082 align = a, alignofs = 4 - c % 4;
4083 else if (a >= 16)
4084 align = a, alignofs = 2 - c % 2;
4085 }
4086 }
4087
4088 /* Handle an unaligned prefix first. */
4089
4090 if (alignofs > 0)
4091 {
4092 #if HOST_BITS_PER_WIDE_INT >= 64
4093 /* Given that alignofs is bounded by align, the only time BWX could
4094 generate three stores is for a 7 byte fill. Prefer two individual
4095 stores over a load/mask/store sequence. */
4096 if ((!TARGET_BWX || alignofs == 7)
4097 && align >= 32
4098 && !(alignofs == 4 && bytes >= 4))
4099 {
4100 enum machine_mode mode = (align >= 64 ? DImode : SImode);
4101 int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
4102 rtx mem, tmp;
4103 HOST_WIDE_INT mask;
4104
4105 mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
4106 set_mem_alias_set (mem, 0);
4107
4108 mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
4109 if (bytes < alignofs)
4110 {
4111 mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
4112 ofs += bytes;
4113 bytes = 0;
4114 }
4115 else
4116 {
4117 bytes -= alignofs;
4118 ofs += alignofs;
4119 }
4120 alignofs = 0;
4121
4122 tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
4123 NULL_RTX, 1, OPTAB_WIDEN);
4124
4125 emit_move_insn (mem, tmp);
4126 }
4127 #endif
4128
4129 if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
4130 {
4131 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4132 bytes -= 1;
4133 ofs += 1;
4134 alignofs -= 1;
4135 }
4136 if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
4137 {
4138 emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
4139 bytes -= 2;
4140 ofs += 2;
4141 alignofs -= 2;
4142 }
4143 if (alignofs == 4 && bytes >= 4)
4144 {
4145 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4146 bytes -= 4;
4147 ofs += 4;
4148 alignofs = 0;
4149 }
4150
4151 /* If we've not used the extra lead alignment information by now,
4152 we won't be able to. Downgrade align to match what's left over. */
4153 if (alignofs > 0)
4154 {
4155 alignofs = alignofs & -alignofs;
4156 align = MIN (align, alignofs * BITS_PER_UNIT);
4157 }
4158 }
4159
4160 /* Handle a block of contiguous long-words. */
4161
4162 if (align >= 64 && bytes >= 8)
4163 {
4164 words = bytes / 8;
4165
4166 for (i = 0; i < words; ++i)
4167 emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
4168 const0_rtx);
4169
4170 bytes -= words * 8;
4171 ofs += words * 8;
4172 }
4173
4174 /* If the block is large and appropriately aligned, emit a single
4175 store followed by a sequence of stq_u insns. */
4176
4177 if (align >= 32 && bytes > 16)
4178 {
4179 rtx orig_dsta;
4180
4181 emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
4182 bytes -= 4;
4183 ofs += 4;
4184
4185 orig_dsta = XEXP (orig_dst, 0);
4186 if (GET_CODE (orig_dsta) == LO_SUM)
4187 orig_dsta = force_reg (Pmode, orig_dsta);
4188
4189 words = bytes / 8;
4190 for (i = 0; i < words; ++i)
4191 {
4192 rtx mem
4193 = change_address (orig_dst, DImode,
4194 gen_rtx_AND (DImode,
4195 plus_constant (orig_dsta, ofs + i*8),
4196 GEN_INT (-8)));
4197 set_mem_alias_set (mem, 0);
4198 emit_move_insn (mem, const0_rtx);
4199 }
4200
4201 /* Depending on the alignment, the first stq_u may have overlapped
4202 with the initial stl, which means that the last stq_u didn't
4203 write as much as it would appear. Leave those questionable bytes
4204 unaccounted for. */
4205 bytes -= words * 8 - 4;
4206 ofs += words * 8 - 4;
4207 }
4208
4209 /* Handle a smaller block of aligned words. */
4210
4211 if ((align >= 64 && bytes == 4)
4212 || (align == 32 && bytes >= 4))
4213 {
4214 words = bytes / 4;
4215
4216 for (i = 0; i < words; ++i)
4217 emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
4218 const0_rtx);
4219
4220 bytes -= words * 4;
4221 ofs += words * 4;
4222 }
4223
4224 /* An unaligned block uses stq_u stores for as many as possible. */
4225
4226 if (bytes >= 8)
4227 {
4228 words = bytes / 8;
4229
4230 alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
4231
4232 bytes -= words * 8;
4233 ofs += words * 8;
4234 }
4235
4236 /* Next clean up any trailing pieces. */
4237
4238 #if HOST_BITS_PER_WIDE_INT >= 64
4239 /* Count the number of bits in BYTES for which aligned stores could
4240 be emitted. */
4241 words = 0;
4242 for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
4243 if (bytes & i)
4244 words += 1;
4245
4246 /* If we have appropriate alignment (and it wouldn't take too many
4247 instructions otherwise), mask out the bytes we need. */
4248 if (TARGET_BWX ? words > 2 : bytes > 0)
4249 {
4250 if (align >= 64)
4251 {
4252 rtx mem, tmp;
4253 HOST_WIDE_INT mask;
4254
4255 mem = adjust_address (orig_dst, DImode, ofs);
4256 set_mem_alias_set (mem, 0);
4257
4258 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4259
4260 tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
4261 NULL_RTX, 1, OPTAB_WIDEN);
4262
4263 emit_move_insn (mem, tmp);
4264 return 1;
4265 }
4266 else if (align >= 32 && bytes < 4)
4267 {
4268 rtx mem, tmp;
4269 HOST_WIDE_INT mask;
4270
4271 mem = adjust_address (orig_dst, SImode, ofs);
4272 set_mem_alias_set (mem, 0);
4273
4274 mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
4275
4276 tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
4277 NULL_RTX, 1, OPTAB_WIDEN);
4278
4279 emit_move_insn (mem, tmp);
4280 return 1;
4281 }
4282 }
4283 #endif
4284
4285 if (!TARGET_BWX && bytes >= 4)
4286 {
4287 alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
4288 bytes -= 4;
4289 ofs += 4;
4290 }
4291
4292 if (bytes >= 2)
4293 {
4294 if (align >= 16)
4295 {
4296 do {
4297 emit_move_insn (adjust_address (orig_dst, HImode, ofs),
4298 const0_rtx);
4299 bytes -= 2;
4300 ofs += 2;
4301 } while (bytes >= 2);
4302 }
4303 else if (! TARGET_BWX)
4304 {
4305 alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
4306 bytes -= 2;
4307 ofs += 2;
4308 }
4309 }
4310
4311 while (bytes > 0)
4312 {
4313 emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
4314 bytes -= 1;
4315 ofs += 1;
4316 }
4317
4318 return 1;
4319 }
4320
4321 /* Returns a mask so that zap(x, value) == x & mask. */
4322
4323 rtx
4324 alpha_expand_zap_mask (HOST_WIDE_INT value)
4325 {
4326 rtx result;
4327 int i;
4328
4329 if (HOST_BITS_PER_WIDE_INT >= 64)
4330 {
4331 HOST_WIDE_INT mask = 0;
4332
4333 for (i = 7; i >= 0; --i)
4334 {
4335 mask <<= 8;
4336 if (!((value >> i) & 1))
4337 mask |= 0xff;
4338 }
4339
4340 result = gen_int_mode (mask, DImode);
4341 }
4342 else
4343 {
4344 HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
4345
4346 gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
4347
4348 for (i = 7; i >= 4; --i)
4349 {
4350 mask_hi <<= 8;
4351 if (!((value >> i) & 1))
4352 mask_hi |= 0xff;
4353 }
4354
4355 for (i = 3; i >= 0; --i)
4356 {
4357 mask_lo <<= 8;
4358 if (!((value >> i) & 1))
4359 mask_lo |= 0xff;
4360 }
4361
4362 result = immed_double_const (mask_lo, mask_hi, DImode);
4363 }
4364
4365 return result;
4366 }
4367
4368 void
4369 alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
4370 enum machine_mode mode,
4371 rtx op0, rtx op1, rtx op2)
4372 {
4373 op0 = gen_lowpart (mode, op0);
4374
4375 if (op1 == const0_rtx)
4376 op1 = CONST0_RTX (mode);
4377 else
4378 op1 = gen_lowpart (mode, op1);
4379
4380 if (op2 == const0_rtx)
4381 op2 = CONST0_RTX (mode);
4382 else
4383 op2 = gen_lowpart (mode, op2);
4384
4385 emit_insn ((*gen) (op0, op1, op2));
4386 }
4387
4388 /* A subroutine of the atomic operation splitters. Jump to LABEL if
4389 COND is true. Mark the jump as unlikely to be taken. */
4390
4391 static void
4392 emit_unlikely_jump (rtx cond, rtx label)
4393 {
4394 rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
4395 rtx x;
4396
4397 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
4398 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
4399 add_reg_note (x, REG_BR_PROB, very_unlikely);
4400 }
4401
4402 /* A subroutine of the atomic operation splitters. Emit a load-locked
4403 instruction in MODE. */
4404
4405 static void
4406 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
4407 {
4408 rtx (*fn) (rtx, rtx) = NULL;
4409 if (mode == SImode)
4410 fn = gen_load_locked_si;
4411 else if (mode == DImode)
4412 fn = gen_load_locked_di;
4413 emit_insn (fn (reg, mem));
4414 }
4415
4416 /* A subroutine of the atomic operation splitters. Emit a store-conditional
4417 instruction in MODE. */
4418
4419 static void
4420 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
4421 {
4422 rtx (*fn) (rtx, rtx, rtx) = NULL;
4423 if (mode == SImode)
4424 fn = gen_store_conditional_si;
4425 else if (mode == DImode)
4426 fn = gen_store_conditional_di;
4427 emit_insn (fn (res, mem, val));
4428 }
4429
4430 /* A subroutine of the atomic operation splitters. Emit an insxl
4431 instruction in MODE. */
4432
4433 static rtx
4434 emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
4435 {
4436 rtx ret = gen_reg_rtx (DImode);
4437 rtx (*fn) (rtx, rtx, rtx);
4438
4439 if (WORDS_BIG_ENDIAN)
4440 {
4441 if (mode == QImode)
4442 fn = gen_insbl_be;
4443 else
4444 fn = gen_inswl_be;
4445 }
4446 else
4447 {
4448 if (mode == QImode)
4449 fn = gen_insbl_le;
4450 else
4451 fn = gen_inswl_le;
4452 }
4453 /* The insbl and inswl patterns require a register operand. */
4454 op1 = force_reg (mode, op1);
4455 emit_insn (fn (ret, op1, op2));
4456
4457 return ret;
4458 }
4459
4460 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
4461 to perform. MEM is the memory on which to operate. VAL is the second
4462 operand of the binary operator. BEFORE and AFTER are optional locations to
4463 return the value of MEM either before of after the operation. SCRATCH is
4464 a scratch register. */
4465
4466 void
4467 alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
4468 rtx before, rtx after, rtx scratch)
4469 {
4470 enum machine_mode mode = GET_MODE (mem);
4471 rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
4472
4473 emit_insn (gen_memory_barrier ());
4474
4475 label = gen_label_rtx ();
4476 emit_label (label);
4477 label = gen_rtx_LABEL_REF (DImode, label);
4478
4479 if (before == NULL)
4480 before = scratch;
4481 emit_load_locked (mode, before, mem);
4482
4483 if (code == NOT)
4484 {
4485 x = gen_rtx_AND (mode, before, val);
4486 emit_insn (gen_rtx_SET (VOIDmode, val, x));
4487
4488 x = gen_rtx_NOT (mode, val);
4489 }
4490 else
4491 x = gen_rtx_fmt_ee (code, mode, before, val);
4492 if (after)
4493 emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
4494 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
4495
4496 emit_store_conditional (mode, cond, mem, scratch);
4497
4498 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4499 emit_unlikely_jump (x, label);
4500
4501 emit_insn (gen_memory_barrier ());
4502 }
4503
4504 /* Expand a compare and swap operation. */
4505
4506 void
4507 alpha_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
4508 rtx scratch)
4509 {
4510 enum machine_mode mode = GET_MODE (mem);
4511 rtx label1, label2, x, cond = gen_lowpart (DImode, scratch);
4512
4513 emit_insn (gen_memory_barrier ());
4514
4515 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4516 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4517 emit_label (XEXP (label1, 0));
4518
4519 emit_load_locked (mode, retval, mem);
4520
4521 x = gen_lowpart (DImode, retval);
4522 if (oldval == const0_rtx)
4523 x = gen_rtx_NE (DImode, x, const0_rtx);
4524 else
4525 {
4526 x = gen_rtx_EQ (DImode, x, oldval);
4527 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4528 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4529 }
4530 emit_unlikely_jump (x, label2);
4531
4532 emit_move_insn (scratch, newval);
4533 emit_store_conditional (mode, cond, mem, scratch);
4534
4535 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4536 emit_unlikely_jump (x, label1);
4537
4538 emit_insn (gen_memory_barrier ());
4539 emit_label (XEXP (label2, 0));
4540 }
4541
4542 void
4543 alpha_expand_compare_and_swap_12 (rtx dst, rtx mem, rtx oldval, rtx newval)
4544 {
4545 enum machine_mode mode = GET_MODE (mem);
4546 rtx addr, align, wdst;
4547 rtx (*fn5) (rtx, rtx, rtx, rtx, rtx);
4548
4549 addr = force_reg (DImode, XEXP (mem, 0));
4550 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4551 NULL_RTX, 1, OPTAB_DIRECT);
4552
4553 oldval = convert_modes (DImode, mode, oldval, 1);
4554 newval = emit_insxl (mode, newval, addr);
4555
4556 wdst = gen_reg_rtx (DImode);
4557 if (mode == QImode)
4558 fn5 = gen_sync_compare_and_swapqi_1;
4559 else
4560 fn5 = gen_sync_compare_and_swaphi_1;
4561 emit_insn (fn5 (wdst, addr, oldval, newval, align));
4562
4563 emit_move_insn (dst, gen_lowpart (mode, wdst));
4564 }
4565
4566 void
4567 alpha_split_compare_and_swap_12 (enum machine_mode mode, rtx dest, rtx addr,
4568 rtx oldval, rtx newval, rtx align,
4569 rtx scratch, rtx cond)
4570 {
4571 rtx label1, label2, mem, width, mask, x;
4572
4573 mem = gen_rtx_MEM (DImode, align);
4574 MEM_VOLATILE_P (mem) = 1;
4575
4576 emit_insn (gen_memory_barrier ());
4577 label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4578 label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4579 emit_label (XEXP (label1, 0));
4580
4581 emit_load_locked (DImode, scratch, mem);
4582
4583 width = GEN_INT (GET_MODE_BITSIZE (mode));
4584 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4585 if (WORDS_BIG_ENDIAN)
4586 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4587 else
4588 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4589
4590 if (oldval == const0_rtx)
4591 x = gen_rtx_NE (DImode, dest, const0_rtx);
4592 else
4593 {
4594 x = gen_rtx_EQ (DImode, dest, oldval);
4595 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
4596 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4597 }
4598 emit_unlikely_jump (x, label2);
4599
4600 if (WORDS_BIG_ENDIAN)
4601 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4602 else
4603 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4604 emit_insn (gen_iordi3 (scratch, scratch, newval));
4605
4606 emit_store_conditional (DImode, scratch, mem, scratch);
4607
4608 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4609 emit_unlikely_jump (x, label1);
4610
4611 emit_insn (gen_memory_barrier ());
4612 emit_label (XEXP (label2, 0));
4613 }
4614
4615 /* Expand an atomic exchange operation. */
4616
4617 void
4618 alpha_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
4619 {
4620 enum machine_mode mode = GET_MODE (mem);
4621 rtx label, x, cond = gen_lowpart (DImode, scratch);
4622
4623 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4624 emit_label (XEXP (label, 0));
4625
4626 emit_load_locked (mode, retval, mem);
4627 emit_move_insn (scratch, val);
4628 emit_store_conditional (mode, cond, mem, scratch);
4629
4630 x = gen_rtx_EQ (DImode, cond, const0_rtx);
4631 emit_unlikely_jump (x, label);
4632
4633 emit_insn (gen_memory_barrier ());
4634 }
4635
4636 void
4637 alpha_expand_lock_test_and_set_12 (rtx dst, rtx mem, rtx val)
4638 {
4639 enum machine_mode mode = GET_MODE (mem);
4640 rtx addr, align, wdst;
4641 rtx (*fn4) (rtx, rtx, rtx, rtx);
4642
4643 /* Force the address into a register. */
4644 addr = force_reg (DImode, XEXP (mem, 0));
4645
4646 /* Align it to a multiple of 8. */
4647 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
4648 NULL_RTX, 1, OPTAB_DIRECT);
4649
4650 /* Insert val into the correct byte location within the word. */
4651 val = emit_insxl (mode, val, addr);
4652
4653 wdst = gen_reg_rtx (DImode);
4654 if (mode == QImode)
4655 fn4 = gen_sync_lock_test_and_setqi_1;
4656 else
4657 fn4 = gen_sync_lock_test_and_sethi_1;
4658 emit_insn (fn4 (wdst, addr, val, align));
4659
4660 emit_move_insn (dst, gen_lowpart (mode, wdst));
4661 }
4662
4663 void
4664 alpha_split_lock_test_and_set_12 (enum machine_mode mode, rtx dest, rtx addr,
4665 rtx val, rtx align, rtx scratch)
4666 {
4667 rtx label, mem, width, mask, x;
4668
4669 mem = gen_rtx_MEM (DImode, align);
4670 MEM_VOLATILE_P (mem) = 1;
4671
4672 label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
4673 emit_label (XEXP (label, 0));
4674
4675 emit_load_locked (DImode, scratch, mem);
4676
4677 width = GEN_INT (GET_MODE_BITSIZE (mode));
4678 mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
4679 if (WORDS_BIG_ENDIAN)
4680 {
4681 emit_insn (gen_extxl_be (dest, scratch, width, addr));
4682 emit_insn (gen_mskxl_be (scratch, scratch, mask, addr));
4683 }
4684 else
4685 {
4686 emit_insn (gen_extxl_le (dest, scratch, width, addr));
4687 emit_insn (gen_mskxl_le (scratch, scratch, mask, addr));
4688 }
4689 emit_insn (gen_iordi3 (scratch, scratch, val));
4690
4691 emit_store_conditional (DImode, scratch, mem, scratch);
4692
4693 x = gen_rtx_EQ (DImode, scratch, const0_rtx);
4694 emit_unlikely_jump (x, label);
4695
4696 emit_insn (gen_memory_barrier ());
4697 }
4698 \f
4699 /* Adjust the cost of a scheduling dependency. Return the new cost of
4700 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4701
4702 static int
4703 alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4704 {
4705 enum attr_type insn_type, dep_insn_type;
4706
4707 /* If the dependence is an anti-dependence, there is no cost. For an
4708 output dependence, there is sometimes a cost, but it doesn't seem
4709 worth handling those few cases. */
4710 if (REG_NOTE_KIND (link) != 0)
4711 return cost;
4712
4713 /* If we can't recognize the insns, we can't really do anything. */
4714 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
4715 return cost;
4716
4717 insn_type = get_attr_type (insn);
4718 dep_insn_type = get_attr_type (dep_insn);
4719
4720 /* Bring in the user-defined memory latency. */
4721 if (dep_insn_type == TYPE_ILD
4722 || dep_insn_type == TYPE_FLD
4723 || dep_insn_type == TYPE_LDSYM)
4724 cost += alpha_memory_latency-1;
4725
4726 /* Everything else handled in DFA bypasses now. */
4727
4728 return cost;
4729 }
4730
4731 /* The number of instructions that can be issued per cycle. */
4732
4733 static int
4734 alpha_issue_rate (void)
4735 {
4736 return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
4737 }
4738
4739 /* How many alternative schedules to try. This should be as wide as the
4740 scheduling freedom in the DFA, but no wider. Making this value too
4741 large results extra work for the scheduler.
4742
4743 For EV4, loads can be issued to either IB0 or IB1, thus we have 2
4744 alternative schedules. For EV5, we can choose between E0/E1 and
4745 FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
4746
4747 static int
4748 alpha_multipass_dfa_lookahead (void)
4749 {
4750 return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
4751 }
4752 \f
4753 /* Machine-specific function data. */
4754
4755 struct GTY(()) machine_function
4756 {
4757 /* For unicosmk. */
4758 /* List of call information words for calls from this function. */
4759 struct rtx_def *first_ciw;
4760 struct rtx_def *last_ciw;
4761 int ciw_count;
4762
4763 /* List of deferred case vectors. */
4764 struct rtx_def *addr_list;
4765
4766 /* For OSF. */
4767 const char *some_ld_name;
4768
4769 /* For TARGET_LD_BUGGY_LDGP. */
4770 struct rtx_def *gp_save_rtx;
4771 };
4772
4773 /* How to allocate a 'struct machine_function'. */
4774
4775 static struct machine_function *
4776 alpha_init_machine_status (void)
4777 {
4778 return ((struct machine_function *)
4779 ggc_alloc_cleared (sizeof (struct machine_function)));
4780 }
4781
4782 /* Functions to save and restore alpha_return_addr_rtx. */
4783
4784 /* Start the ball rolling with RETURN_ADDR_RTX. */
4785
4786 rtx
4787 alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
4788 {
4789 if (count != 0)
4790 return const0_rtx;
4791
4792 return get_hard_reg_initial_val (Pmode, REG_RA);
4793 }
4794
4795 /* Return or create a memory slot containing the gp value for the current
4796 function. Needed only if TARGET_LD_BUGGY_LDGP. */
4797
4798 rtx
4799 alpha_gp_save_rtx (void)
4800 {
4801 rtx seq, m = cfun->machine->gp_save_rtx;
4802
4803 if (m == NULL)
4804 {
4805 start_sequence ();
4806
4807 m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
4808 m = validize_mem (m);
4809 emit_move_insn (m, pic_offset_table_rtx);
4810
4811 seq = get_insns ();
4812 end_sequence ();
4813
4814 /* We used to simply emit the sequence after entry_of_function.
4815 However this breaks the CFG if the first instruction in the
4816 first block is not the NOTE_INSN_BASIC_BLOCK, for example a
4817 label. Emit the sequence properly on the edge. We are only
4818 invoked from dw2_build_landing_pads and finish_eh_generation
4819 will call commit_edge_insertions thanks to a kludge. */
4820 insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4821
4822 cfun->machine->gp_save_rtx = m;
4823 }
4824
4825 return m;
4826 }
4827
4828 static int
4829 alpha_ra_ever_killed (void)
4830 {
4831 rtx top;
4832
4833 if (!has_hard_reg_initial_val (Pmode, REG_RA))
4834 return (int)df_regs_ever_live_p (REG_RA);
4835
4836 push_topmost_sequence ();
4837 top = get_insns ();
4838 pop_topmost_sequence ();
4839
4840 return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
4841 }
4842
4843 \f
4844 /* Return the trap mode suffix applicable to the current
4845 instruction, or NULL. */
4846
4847 static const char *
4848 get_trap_mode_suffix (void)
4849 {
4850 enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
4851
4852 switch (s)
4853 {
4854 case TRAP_SUFFIX_NONE:
4855 return NULL;
4856
4857 case TRAP_SUFFIX_SU:
4858 if (alpha_fptm >= ALPHA_FPTM_SU)
4859 return "su";
4860 return NULL;
4861
4862 case TRAP_SUFFIX_SUI:
4863 if (alpha_fptm >= ALPHA_FPTM_SUI)
4864 return "sui";
4865 return NULL;
4866
4867 case TRAP_SUFFIX_V_SV:
4868 switch (alpha_fptm)
4869 {
4870 case ALPHA_FPTM_N:
4871 return NULL;
4872 case ALPHA_FPTM_U:
4873 return "v";
4874 case ALPHA_FPTM_SU:
4875 case ALPHA_FPTM_SUI:
4876 return "sv";
4877 default:
4878 gcc_unreachable ();
4879 }
4880
4881 case TRAP_SUFFIX_V_SV_SVI:
4882 switch (alpha_fptm)
4883 {
4884 case ALPHA_FPTM_N:
4885 return NULL;
4886 case ALPHA_FPTM_U:
4887 return "v";
4888 case ALPHA_FPTM_SU:
4889 return "sv";
4890 case ALPHA_FPTM_SUI:
4891 return "svi";
4892 default:
4893 gcc_unreachable ();
4894 }
4895 break;
4896
4897 case TRAP_SUFFIX_U_SU_SUI:
4898 switch (alpha_fptm)
4899 {
4900 case ALPHA_FPTM_N:
4901 return NULL;
4902 case ALPHA_FPTM_U:
4903 return "u";
4904 case ALPHA_FPTM_SU:
4905 return "su";
4906 case ALPHA_FPTM_SUI:
4907 return "sui";
4908 default:
4909 gcc_unreachable ();
4910 }
4911 break;
4912
4913 default:
4914 gcc_unreachable ();
4915 }
4916 gcc_unreachable ();
4917 }
4918
4919 /* Return the rounding mode suffix applicable to the current
4920 instruction, or NULL. */
4921
4922 static const char *
4923 get_round_mode_suffix (void)
4924 {
4925 enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
4926
4927 switch (s)
4928 {
4929 case ROUND_SUFFIX_NONE:
4930 return NULL;
4931 case ROUND_SUFFIX_NORMAL:
4932 switch (alpha_fprm)
4933 {
4934 case ALPHA_FPRM_NORM:
4935 return NULL;
4936 case ALPHA_FPRM_MINF:
4937 return "m";
4938 case ALPHA_FPRM_CHOP:
4939 return "c";
4940 case ALPHA_FPRM_DYN:
4941 return "d";
4942 default:
4943 gcc_unreachable ();
4944 }
4945 break;
4946
4947 case ROUND_SUFFIX_C:
4948 return "c";
4949
4950 default:
4951 gcc_unreachable ();
4952 }
4953 gcc_unreachable ();
4954 }
4955
4956 /* Locate some local-dynamic symbol still in use by this function
4957 so that we can print its name in some movdi_er_tlsldm pattern. */
4958
4959 static int
4960 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
4961 {
4962 rtx x = *px;
4963
4964 if (GET_CODE (x) == SYMBOL_REF
4965 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
4966 {
4967 cfun->machine->some_ld_name = XSTR (x, 0);
4968 return 1;
4969 }
4970
4971 return 0;
4972 }
4973
4974 static const char *
4975 get_some_local_dynamic_name (void)
4976 {
4977 rtx insn;
4978
4979 if (cfun->machine->some_ld_name)
4980 return cfun->machine->some_ld_name;
4981
4982 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
4983 if (INSN_P (insn)
4984 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
4985 return cfun->machine->some_ld_name;
4986
4987 gcc_unreachable ();
4988 }
4989
4990 /* Print an operand. Recognize special options, documented below. */
4991
4992 void
4993 print_operand (FILE *file, rtx x, int code)
4994 {
4995 int i;
4996
4997 switch (code)
4998 {
4999 case '~':
5000 /* Print the assembler name of the current function. */
5001 assemble_name (file, alpha_fnname);
5002 break;
5003
5004 case '&':
5005 assemble_name (file, get_some_local_dynamic_name ());
5006 break;
5007
5008 case '/':
5009 {
5010 const char *trap = get_trap_mode_suffix ();
5011 const char *round = get_round_mode_suffix ();
5012
5013 if (trap || round)
5014 fprintf (file, (TARGET_AS_SLASH_BEFORE_SUFFIX ? "/%s%s" : "%s%s"),
5015 (trap ? trap : ""), (round ? round : ""));
5016 break;
5017 }
5018
5019 case ',':
5020 /* Generates single precision instruction suffix. */
5021 fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
5022 break;
5023
5024 case '-':
5025 /* Generates double precision instruction suffix. */
5026 fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
5027 break;
5028
5029 case '#':
5030 if (alpha_this_literal_sequence_number == 0)
5031 alpha_this_literal_sequence_number = alpha_next_sequence_number++;
5032 fprintf (file, "%d", alpha_this_literal_sequence_number);
5033 break;
5034
5035 case '*':
5036 if (alpha_this_gpdisp_sequence_number == 0)
5037 alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
5038 fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
5039 break;
5040
5041 case 'H':
5042 if (GET_CODE (x) == HIGH)
5043 output_addr_const (file, XEXP (x, 0));
5044 else
5045 output_operand_lossage ("invalid %%H value");
5046 break;
5047
5048 case 'J':
5049 {
5050 const char *lituse;
5051
5052 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
5053 {
5054 x = XVECEXP (x, 0, 0);
5055 lituse = "lituse_tlsgd";
5056 }
5057 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
5058 {
5059 x = XVECEXP (x, 0, 0);
5060 lituse = "lituse_tlsldm";
5061 }
5062 else if (CONST_INT_P (x))
5063 lituse = "lituse_jsr";
5064 else
5065 {
5066 output_operand_lossage ("invalid %%J value");
5067 break;
5068 }
5069
5070 if (x != const0_rtx)
5071 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5072 }
5073 break;
5074
5075 case 'j':
5076 {
5077 const char *lituse;
5078
5079 #ifdef HAVE_AS_JSRDIRECT_RELOCS
5080 lituse = "lituse_jsrdirect";
5081 #else
5082 lituse = "lituse_jsr";
5083 #endif
5084
5085 gcc_assert (INTVAL (x) != 0);
5086 fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
5087 }
5088 break;
5089 case 'r':
5090 /* If this operand is the constant zero, write it as "$31". */
5091 if (REG_P (x))
5092 fprintf (file, "%s", reg_names[REGNO (x)]);
5093 else if (x == CONST0_RTX (GET_MODE (x)))
5094 fprintf (file, "$31");
5095 else
5096 output_operand_lossage ("invalid %%r value");
5097 break;
5098
5099 case 'R':
5100 /* Similar, but for floating-point. */
5101 if (REG_P (x))
5102 fprintf (file, "%s", reg_names[REGNO (x)]);
5103 else if (x == CONST0_RTX (GET_MODE (x)))
5104 fprintf (file, "$f31");
5105 else
5106 output_operand_lossage ("invalid %%R value");
5107 break;
5108
5109 case 'N':
5110 /* Write the 1's complement of a constant. */
5111 if (!CONST_INT_P (x))
5112 output_operand_lossage ("invalid %%N value");
5113
5114 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
5115 break;
5116
5117 case 'P':
5118 /* Write 1 << C, for a constant C. */
5119 if (!CONST_INT_P (x))
5120 output_operand_lossage ("invalid %%P value");
5121
5122 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
5123 break;
5124
5125 case 'h':
5126 /* Write the high-order 16 bits of a constant, sign-extended. */
5127 if (!CONST_INT_P (x))
5128 output_operand_lossage ("invalid %%h value");
5129
5130 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
5131 break;
5132
5133 case 'L':
5134 /* Write the low-order 16 bits of a constant, sign-extended. */
5135 if (!CONST_INT_P (x))
5136 output_operand_lossage ("invalid %%L value");
5137
5138 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5139 (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
5140 break;
5141
5142 case 'm':
5143 /* Write mask for ZAP insn. */
5144 if (GET_CODE (x) == CONST_DOUBLE)
5145 {
5146 HOST_WIDE_INT mask = 0;
5147 HOST_WIDE_INT value;
5148
5149 value = CONST_DOUBLE_LOW (x);
5150 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5151 i++, value >>= 8)
5152 if (value & 0xff)
5153 mask |= (1 << i);
5154
5155 value = CONST_DOUBLE_HIGH (x);
5156 for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
5157 i++, value >>= 8)
5158 if (value & 0xff)
5159 mask |= (1 << (i + sizeof (int)));
5160
5161 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
5162 }
5163
5164 else if (CONST_INT_P (x))
5165 {
5166 HOST_WIDE_INT mask = 0, value = INTVAL (x);
5167
5168 for (i = 0; i < 8; i++, value >>= 8)
5169 if (value & 0xff)
5170 mask |= (1 << i);
5171
5172 fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
5173 }
5174 else
5175 output_operand_lossage ("invalid %%m value");
5176 break;
5177
5178 case 'M':
5179 /* 'b', 'w', 'l', or 'q' as the value of the constant. */
5180 if (!CONST_INT_P (x)
5181 || (INTVAL (x) != 8 && INTVAL (x) != 16
5182 && INTVAL (x) != 32 && INTVAL (x) != 64))
5183 output_operand_lossage ("invalid %%M value");
5184
5185 fprintf (file, "%s",
5186 (INTVAL (x) == 8 ? "b"
5187 : INTVAL (x) == 16 ? "w"
5188 : INTVAL (x) == 32 ? "l"
5189 : "q"));
5190 break;
5191
5192 case 'U':
5193 /* Similar, except do it from the mask. */
5194 if (CONST_INT_P (x))
5195 {
5196 HOST_WIDE_INT value = INTVAL (x);
5197
5198 if (value == 0xff)
5199 {
5200 fputc ('b', file);
5201 break;
5202 }
5203 if (value == 0xffff)
5204 {
5205 fputc ('w', file);
5206 break;
5207 }
5208 if (value == 0xffffffff)
5209 {
5210 fputc ('l', file);
5211 break;
5212 }
5213 if (value == -1)
5214 {
5215 fputc ('q', file);
5216 break;
5217 }
5218 }
5219 else if (HOST_BITS_PER_WIDE_INT == 32
5220 && GET_CODE (x) == CONST_DOUBLE
5221 && CONST_DOUBLE_LOW (x) == 0xffffffff
5222 && CONST_DOUBLE_HIGH (x) == 0)
5223 {
5224 fputc ('l', file);
5225 break;
5226 }
5227 output_operand_lossage ("invalid %%U value");
5228 break;
5229
5230 case 's':
5231 /* Write the constant value divided by 8 for little-endian mode or
5232 (56 - value) / 8 for big-endian mode. */
5233
5234 if (!CONST_INT_P (x)
5235 || (unsigned HOST_WIDE_INT) INTVAL (x) >= (WORDS_BIG_ENDIAN
5236 ? 56
5237 : 64)
5238 || (INTVAL (x) & 7) != 0)
5239 output_operand_lossage ("invalid %%s value");
5240
5241 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
5242 WORDS_BIG_ENDIAN
5243 ? (56 - INTVAL (x)) / 8
5244 : INTVAL (x) / 8);
5245 break;
5246
5247 case 'S':
5248 /* Same, except compute (64 - c) / 8 */
5249
5250 if (!CONST_INT_P (x)
5251 && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
5252 && (INTVAL (x) & 7) != 8)
5253 output_operand_lossage ("invalid %%s value");
5254
5255 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
5256 break;
5257
5258 case 't':
5259 {
5260 /* On Unicos/Mk systems: use a DEX expression if the symbol
5261 clashes with a register name. */
5262 int dex = unicosmk_need_dex (x);
5263 if (dex)
5264 fprintf (file, "DEX(%d)", dex);
5265 else
5266 output_addr_const (file, x);
5267 }
5268 break;
5269
5270 case 'C': case 'D': case 'c': case 'd':
5271 /* Write out comparison name. */
5272 {
5273 enum rtx_code c = GET_CODE (x);
5274
5275 if (!COMPARISON_P (x))
5276 output_operand_lossage ("invalid %%C value");
5277
5278 else if (code == 'D')
5279 c = reverse_condition (c);
5280 else if (code == 'c')
5281 c = swap_condition (c);
5282 else if (code == 'd')
5283 c = swap_condition (reverse_condition (c));
5284
5285 if (c == LEU)
5286 fprintf (file, "ule");
5287 else if (c == LTU)
5288 fprintf (file, "ult");
5289 else if (c == UNORDERED)
5290 fprintf (file, "un");
5291 else
5292 fprintf (file, "%s", GET_RTX_NAME (c));
5293 }
5294 break;
5295
5296 case 'E':
5297 /* Write the divide or modulus operator. */
5298 switch (GET_CODE (x))
5299 {
5300 case DIV:
5301 fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
5302 break;
5303 case UDIV:
5304 fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
5305 break;
5306 case MOD:
5307 fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
5308 break;
5309 case UMOD:
5310 fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
5311 break;
5312 default:
5313 output_operand_lossage ("invalid %%E value");
5314 break;
5315 }
5316 break;
5317
5318 case 'A':
5319 /* Write "_u" for unaligned access. */
5320 if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
5321 fprintf (file, "_u");
5322 break;
5323
5324 case 0:
5325 if (REG_P (x))
5326 fprintf (file, "%s", reg_names[REGNO (x)]);
5327 else if (MEM_P (x))
5328 output_address (XEXP (x, 0));
5329 else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
5330 {
5331 switch (XINT (XEXP (x, 0), 1))
5332 {
5333 case UNSPEC_DTPREL:
5334 case UNSPEC_TPREL:
5335 output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
5336 break;
5337 default:
5338 output_operand_lossage ("unknown relocation unspec");
5339 break;
5340 }
5341 }
5342 else
5343 output_addr_const (file, x);
5344 break;
5345
5346 default:
5347 output_operand_lossage ("invalid %%xn code");
5348 }
5349 }
5350
5351 void
5352 print_operand_address (FILE *file, rtx addr)
5353 {
5354 int basereg = 31;
5355 HOST_WIDE_INT offset = 0;
5356
5357 if (GET_CODE (addr) == AND)
5358 addr = XEXP (addr, 0);
5359
5360 if (GET_CODE (addr) == PLUS
5361 && CONST_INT_P (XEXP (addr, 1)))
5362 {
5363 offset = INTVAL (XEXP (addr, 1));
5364 addr = XEXP (addr, 0);
5365 }
5366
5367 if (GET_CODE (addr) == LO_SUM)
5368 {
5369 const char *reloc16, *reloclo;
5370 rtx op1 = XEXP (addr, 1);
5371
5372 if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
5373 {
5374 op1 = XEXP (op1, 0);
5375 switch (XINT (op1, 1))
5376 {
5377 case UNSPEC_DTPREL:
5378 reloc16 = NULL;
5379 reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
5380 break;
5381 case UNSPEC_TPREL:
5382 reloc16 = NULL;
5383 reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
5384 break;
5385 default:
5386 output_operand_lossage ("unknown relocation unspec");
5387 return;
5388 }
5389
5390 output_addr_const (file, XVECEXP (op1, 0, 0));
5391 }
5392 else
5393 {
5394 reloc16 = "gprel";
5395 reloclo = "gprellow";
5396 output_addr_const (file, op1);
5397 }
5398
5399 if (offset)
5400 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
5401
5402 addr = XEXP (addr, 0);
5403 switch (GET_CODE (addr))
5404 {
5405 case REG:
5406 basereg = REGNO (addr);
5407 break;
5408
5409 case SUBREG:
5410 basereg = subreg_regno (addr);
5411 break;
5412
5413 default:
5414 gcc_unreachable ();
5415 }
5416
5417 fprintf (file, "($%d)\t\t!%s", basereg,
5418 (basereg == 29 ? reloc16 : reloclo));
5419 return;
5420 }
5421
5422 switch (GET_CODE (addr))
5423 {
5424 case REG:
5425 basereg = REGNO (addr);
5426 break;
5427
5428 case SUBREG:
5429 basereg = subreg_regno (addr);
5430 break;
5431
5432 case CONST_INT:
5433 offset = INTVAL (addr);
5434 break;
5435
5436 #if TARGET_ABI_OPEN_VMS
5437 case SYMBOL_REF:
5438 fprintf (file, "%s", XSTR (addr, 0));
5439 return;
5440
5441 case CONST:
5442 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
5443 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
5444 fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
5445 XSTR (XEXP (XEXP (addr, 0), 0), 0),
5446 INTVAL (XEXP (XEXP (addr, 0), 1)));
5447 return;
5448
5449 #endif
5450 default:
5451 gcc_unreachable ();
5452 }
5453
5454 fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
5455 }
5456 \f
5457 /* Emit RTL insns to initialize the variable parts of a trampoline at
5458 TRAMP. FNADDR is an RTX for the address of the function's pure
5459 code. CXT is an RTX for the static chain value for the function.
5460
5461 The three offset parameters are for the individual template's
5462 layout. A JMPOFS < 0 indicates that the trampoline does not
5463 contain instructions at all.
5464
5465 We assume here that a function will be called many more times than
5466 its address is taken (e.g., it might be passed to qsort), so we
5467 take the trouble to initialize the "hint" field in the JMP insn.
5468 Note that the hint field is PC (new) + 4 * bits 13:0. */
5469
5470 void
5471 alpha_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt,
5472 int fnofs, int cxtofs, int jmpofs)
5473 {
5474 rtx addr;
5475 /* VMS really uses DImode pointers in memory at this point. */
5476 enum machine_mode mode = TARGET_ABI_OPEN_VMS ? Pmode : ptr_mode;
5477
5478 #ifdef POINTERS_EXTEND_UNSIGNED
5479 fnaddr = convert_memory_address (mode, fnaddr);
5480 cxt = convert_memory_address (mode, cxt);
5481 #endif
5482
5483 /* Store function address and CXT. */
5484 addr = memory_address (mode, plus_constant (tramp, fnofs));
5485 emit_move_insn (gen_rtx_MEM (mode, addr), fnaddr);
5486 addr = memory_address (mode, plus_constant (tramp, cxtofs));
5487 emit_move_insn (gen_rtx_MEM (mode, addr), cxt);
5488
5489 #ifdef ENABLE_EXECUTE_STACK
5490 emit_library_call (init_one_libfunc ("__enable_execute_stack"),
5491 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
5492 #endif
5493
5494 if (jmpofs >= 0)
5495 emit_insn (gen_imb ());
5496 }
5497 \f
5498 /* Determine where to put an argument to a function.
5499 Value is zero to push the argument on the stack,
5500 or a hard register in which to store the argument.
5501
5502 MODE is the argument's machine mode.
5503 TYPE is the data type of the argument (as a tree).
5504 This is null for libcalls where that information may
5505 not be available.
5506 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5507 the preceding args and about the function being called.
5508 NAMED is nonzero if this argument is a named parameter
5509 (otherwise it is an extra parameter matching an ellipsis).
5510
5511 On Alpha the first 6 words of args are normally in registers
5512 and the rest are pushed. */
5513
5514 rtx
5515 function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode, tree type,
5516 int named ATTRIBUTE_UNUSED)
5517 {
5518 int basereg;
5519 int num_args;
5520
5521 /* Don't get confused and pass small structures in FP registers. */
5522 if (type && AGGREGATE_TYPE_P (type))
5523 basereg = 16;
5524 else
5525 {
5526 #ifdef ENABLE_CHECKING
5527 /* With alpha_split_complex_arg, we shouldn't see any raw complex
5528 values here. */
5529 gcc_assert (!COMPLEX_MODE_P (mode));
5530 #endif
5531
5532 /* Set up defaults for FP operands passed in FP registers, and
5533 integral operands passed in integer registers. */
5534 if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
5535 basereg = 32 + 16;
5536 else
5537 basereg = 16;
5538 }
5539
5540 /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
5541 the three platforms, so we can't avoid conditional compilation. */
5542 #if TARGET_ABI_OPEN_VMS
5543 {
5544 if (mode == VOIDmode)
5545 return alpha_arg_info_reg_val (cum);
5546
5547 num_args = cum.num_args;
5548 if (num_args >= 6
5549 || targetm.calls.must_pass_in_stack (mode, type))
5550 return NULL_RTX;
5551 }
5552 #elif TARGET_ABI_UNICOSMK
5553 {
5554 int size;
5555
5556 /* If this is the last argument, generate the call info word (CIW). */
5557 /* ??? We don't include the caller's line number in the CIW because
5558 I don't know how to determine it if debug infos are turned off. */
5559 if (mode == VOIDmode)
5560 {
5561 int i;
5562 HOST_WIDE_INT lo;
5563 HOST_WIDE_INT hi;
5564 rtx ciw;
5565
5566 lo = 0;
5567
5568 for (i = 0; i < cum.num_reg_words && i < 5; i++)
5569 if (cum.reg_args_type[i])
5570 lo |= (1 << (7 - i));
5571
5572 if (cum.num_reg_words == 6 && cum.reg_args_type[5])
5573 lo |= 7;
5574 else
5575 lo |= cum.num_reg_words;
5576
5577 #if HOST_BITS_PER_WIDE_INT == 32
5578 hi = (cum.num_args << 20) | cum.num_arg_words;
5579 #else
5580 lo = lo | ((HOST_WIDE_INT) cum.num_args << 52)
5581 | ((HOST_WIDE_INT) cum.num_arg_words << 32);
5582 hi = 0;
5583 #endif
5584 ciw = immed_double_const (lo, hi, DImode);
5585
5586 return gen_rtx_UNSPEC (DImode, gen_rtvec (1, ciw),
5587 UNSPEC_UMK_LOAD_CIW);
5588 }
5589
5590 size = ALPHA_ARG_SIZE (mode, type, named);
5591 num_args = cum.num_reg_words;
5592 if (cum.force_stack
5593 || cum.num_reg_words + size > 6
5594 || targetm.calls.must_pass_in_stack (mode, type))
5595 return NULL_RTX;
5596 else if (type && TYPE_MODE (type) == BLKmode)
5597 {
5598 rtx reg1, reg2;
5599
5600 reg1 = gen_rtx_REG (DImode, num_args + 16);
5601 reg1 = gen_rtx_EXPR_LIST (DImode, reg1, const0_rtx);
5602
5603 /* The argument fits in two registers. Note that we still need to
5604 reserve a register for empty structures. */
5605 if (size == 0)
5606 return NULL_RTX;
5607 else if (size == 1)
5608 return gen_rtx_PARALLEL (mode, gen_rtvec (1, reg1));
5609 else
5610 {
5611 reg2 = gen_rtx_REG (DImode, num_args + 17);
5612 reg2 = gen_rtx_EXPR_LIST (DImode, reg2, GEN_INT (8));
5613 return gen_rtx_PARALLEL (mode, gen_rtvec (2, reg1, reg2));
5614 }
5615 }
5616 }
5617 #elif TARGET_ABI_OSF
5618 {
5619 if (cum >= 6)
5620 return NULL_RTX;
5621 num_args = cum;
5622
5623 /* VOID is passed as a special flag for "last argument". */
5624 if (type == void_type_node)
5625 basereg = 16;
5626 else if (targetm.calls.must_pass_in_stack (mode, type))
5627 return NULL_RTX;
5628 }
5629 #else
5630 #error Unhandled ABI
5631 #endif
5632
5633 return gen_rtx_REG (mode, num_args + basereg);
5634 }
5635
5636 static int
5637 alpha_arg_partial_bytes (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5638 enum machine_mode mode ATTRIBUTE_UNUSED,
5639 tree type ATTRIBUTE_UNUSED,
5640 bool named ATTRIBUTE_UNUSED)
5641 {
5642 int words = 0;
5643
5644 #if TARGET_ABI_OPEN_VMS
5645 if (cum->num_args < 6
5646 && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
5647 words = 6 - cum->num_args;
5648 #elif TARGET_ABI_UNICOSMK
5649 /* Never any split arguments. */
5650 #elif TARGET_ABI_OSF
5651 if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
5652 words = 6 - *cum;
5653 #else
5654 #error Unhandled ABI
5655 #endif
5656
5657 return words * UNITS_PER_WORD;
5658 }
5659
5660
5661 /* Return true if TYPE must be returned in memory, instead of in registers. */
5662
5663 static bool
5664 alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5665 {
5666 enum machine_mode mode = VOIDmode;
5667 int size;
5668
5669 if (type)
5670 {
5671 mode = TYPE_MODE (type);
5672
5673 /* All aggregates are returned in memory. */
5674 if (AGGREGATE_TYPE_P (type))
5675 return true;
5676 }
5677
5678 size = GET_MODE_SIZE (mode);
5679 switch (GET_MODE_CLASS (mode))
5680 {
5681 case MODE_VECTOR_FLOAT:
5682 /* Pass all float vectors in memory, like an aggregate. */
5683 return true;
5684
5685 case MODE_COMPLEX_FLOAT:
5686 /* We judge complex floats on the size of their element,
5687 not the size of the whole type. */
5688 size = GET_MODE_UNIT_SIZE (mode);
5689 break;
5690
5691 case MODE_INT:
5692 case MODE_FLOAT:
5693 case MODE_COMPLEX_INT:
5694 case MODE_VECTOR_INT:
5695 break;
5696
5697 default:
5698 /* ??? We get called on all sorts of random stuff from
5699 aggregate_value_p. We must return something, but it's not
5700 clear what's safe to return. Pretend it's a struct I
5701 guess. */
5702 return true;
5703 }
5704
5705 /* Otherwise types must fit in one register. */
5706 return size > UNITS_PER_WORD;
5707 }
5708
5709 /* Return true if TYPE should be passed by invisible reference. */
5710
5711 static bool
5712 alpha_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5713 enum machine_mode mode,
5714 const_tree type ATTRIBUTE_UNUSED,
5715 bool named ATTRIBUTE_UNUSED)
5716 {
5717 return mode == TFmode || mode == TCmode;
5718 }
5719
5720 /* Define how to find the value returned by a function. VALTYPE is the
5721 data type of the value (as a tree). If the precise function being
5722 called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
5723 MODE is set instead of VALTYPE for libcalls.
5724
5725 On Alpha the value is found in $0 for integer functions and
5726 $f0 for floating-point functions. */
5727
5728 rtx
5729 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
5730 enum machine_mode mode)
5731 {
5732 unsigned int regnum, dummy;
5733 enum mode_class mclass;
5734
5735 gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
5736
5737 if (valtype)
5738 mode = TYPE_MODE (valtype);
5739
5740 mclass = GET_MODE_CLASS (mode);
5741 switch (mclass)
5742 {
5743 case MODE_INT:
5744 PROMOTE_MODE (mode, dummy, valtype);
5745 /* FALLTHRU */
5746
5747 case MODE_COMPLEX_INT:
5748 case MODE_VECTOR_INT:
5749 regnum = 0;
5750 break;
5751
5752 case MODE_FLOAT:
5753 regnum = 32;
5754 break;
5755
5756 case MODE_COMPLEX_FLOAT:
5757 {
5758 enum machine_mode cmode = GET_MODE_INNER (mode);
5759
5760 return gen_rtx_PARALLEL
5761 (VOIDmode,
5762 gen_rtvec (2,
5763 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
5764 const0_rtx),
5765 gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
5766 GEN_INT (GET_MODE_SIZE (cmode)))));
5767 }
5768
5769 default:
5770 gcc_unreachable ();
5771 }
5772
5773 return gen_rtx_REG (mode, regnum);
5774 }
5775
5776 /* TCmode complex values are passed by invisible reference. We
5777 should not split these values. */
5778
5779 static bool
5780 alpha_split_complex_arg (const_tree type)
5781 {
5782 return TYPE_MODE (type) != TCmode;
5783 }
5784
5785 static tree
5786 alpha_build_builtin_va_list (void)
5787 {
5788 tree base, ofs, space, record, type_decl;
5789
5790 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
5791 return ptr_type_node;
5792
5793 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
5794 type_decl = build_decl (BUILTINS_LOCATION,
5795 TYPE_DECL, get_identifier ("__va_list_tag"), record);
5796 TREE_CHAIN (record) = type_decl;
5797 TYPE_NAME (record) = type_decl;
5798
5799 /* C++? SET_IS_AGGR_TYPE (record, 1); */
5800
5801 /* Dummy field to prevent alignment warnings. */
5802 space = build_decl (BUILTINS_LOCATION,
5803 FIELD_DECL, NULL_TREE, integer_type_node);
5804 DECL_FIELD_CONTEXT (space) = record;
5805 DECL_ARTIFICIAL (space) = 1;
5806 DECL_IGNORED_P (space) = 1;
5807
5808 ofs = build_decl (BUILTINS_LOCATION,
5809 FIELD_DECL, get_identifier ("__offset"),
5810 integer_type_node);
5811 DECL_FIELD_CONTEXT (ofs) = record;
5812 TREE_CHAIN (ofs) = space;
5813
5814 base = build_decl (BUILTINS_LOCATION,
5815 FIELD_DECL, get_identifier ("__base"),
5816 ptr_type_node);
5817 DECL_FIELD_CONTEXT (base) = record;
5818 TREE_CHAIN (base) = ofs;
5819
5820 TYPE_FIELDS (record) = base;
5821 layout_type (record);
5822
5823 va_list_gpr_counter_field = ofs;
5824 return record;
5825 }
5826
5827 #if TARGET_ABI_OSF
5828 /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
5829 and constant additions. */
5830
5831 static gimple
5832 va_list_skip_additions (tree lhs)
5833 {
5834 gimple stmt;
5835
5836 for (;;)
5837 {
5838 enum tree_code code;
5839
5840 stmt = SSA_NAME_DEF_STMT (lhs);
5841
5842 if (gimple_code (stmt) == GIMPLE_PHI)
5843 return stmt;
5844
5845 if (!is_gimple_assign (stmt)
5846 || gimple_assign_lhs (stmt) != lhs)
5847 return NULL;
5848
5849 if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
5850 return stmt;
5851 code = gimple_assign_rhs_code (stmt);
5852 if (!CONVERT_EXPR_CODE_P (code)
5853 && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
5854 || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
5855 || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
5856 return stmt;
5857
5858 lhs = gimple_assign_rhs1 (stmt);
5859 }
5860 }
5861
5862 /* Check if LHS = RHS statement is
5863 LHS = *(ap.__base + ap.__offset + cst)
5864 or
5865 LHS = *(ap.__base
5866 + ((ap.__offset + cst <= 47)
5867 ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
5868 If the former, indicate that GPR registers are needed,
5869 if the latter, indicate that FPR registers are needed.
5870
5871 Also look for LHS = (*ptr).field, where ptr is one of the forms
5872 listed above.
5873
5874 On alpha, cfun->va_list_gpr_size is used as size of the needed
5875 regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
5876 registers are needed and bit 1 set if FPR registers are needed.
5877 Return true if va_list references should not be scanned for the
5878 current statement. */
5879
5880 static bool
5881 alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
5882 {
5883 tree base, offset, rhs;
5884 int offset_arg = 1;
5885 gimple base_stmt;
5886
5887 if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
5888 != GIMPLE_SINGLE_RHS)
5889 return false;
5890
5891 rhs = gimple_assign_rhs1 (stmt);
5892 while (handled_component_p (rhs))
5893 rhs = TREE_OPERAND (rhs, 0);
5894 if (TREE_CODE (rhs) != INDIRECT_REF
5895 || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
5896 return false;
5897
5898 stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
5899 if (stmt == NULL
5900 || !is_gimple_assign (stmt)
5901 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
5902 return false;
5903
5904 base = gimple_assign_rhs1 (stmt);
5905 if (TREE_CODE (base) == SSA_NAME)
5906 {
5907 base_stmt = va_list_skip_additions (base);
5908 if (base_stmt
5909 && is_gimple_assign (base_stmt)
5910 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5911 base = gimple_assign_rhs1 (base_stmt);
5912 }
5913
5914 if (TREE_CODE (base) != COMPONENT_REF
5915 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5916 {
5917 base = gimple_assign_rhs2 (stmt);
5918 if (TREE_CODE (base) == SSA_NAME)
5919 {
5920 base_stmt = va_list_skip_additions (base);
5921 if (base_stmt
5922 && is_gimple_assign (base_stmt)
5923 && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
5924 base = gimple_assign_rhs1 (base_stmt);
5925 }
5926
5927 if (TREE_CODE (base) != COMPONENT_REF
5928 || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
5929 return false;
5930
5931 offset_arg = 0;
5932 }
5933
5934 base = get_base_address (base);
5935 if (TREE_CODE (base) != VAR_DECL
5936 || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
5937 return false;
5938
5939 offset = gimple_op (stmt, 1 + offset_arg);
5940 if (TREE_CODE (offset) == SSA_NAME)
5941 {
5942 gimple offset_stmt = va_list_skip_additions (offset);
5943
5944 if (offset_stmt
5945 && gimple_code (offset_stmt) == GIMPLE_PHI)
5946 {
5947 HOST_WIDE_INT sub;
5948 gimple arg1_stmt, arg2_stmt;
5949 tree arg1, arg2;
5950 enum tree_code code1, code2;
5951
5952 if (gimple_phi_num_args (offset_stmt) != 2)
5953 goto escapes;
5954
5955 arg1_stmt
5956 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
5957 arg2_stmt
5958 = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
5959 if (arg1_stmt == NULL
5960 || !is_gimple_assign (arg1_stmt)
5961 || arg2_stmt == NULL
5962 || !is_gimple_assign (arg2_stmt))
5963 goto escapes;
5964
5965 code1 = gimple_assign_rhs_code (arg1_stmt);
5966 code2 = gimple_assign_rhs_code (arg2_stmt);
5967 if (code1 == COMPONENT_REF
5968 && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
5969 /* Do nothing. */;
5970 else if (code2 == COMPONENT_REF
5971 && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
5972 {
5973 gimple tem = arg1_stmt;
5974 code2 = code1;
5975 arg1_stmt = arg2_stmt;
5976 arg2_stmt = tem;
5977 }
5978 else
5979 goto escapes;
5980
5981 if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
5982 goto escapes;
5983
5984 sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
5985 if (code2 == MINUS_EXPR)
5986 sub = -sub;
5987 if (sub < -48 || sub > -32)
5988 goto escapes;
5989
5990 arg1 = gimple_assign_rhs1 (arg1_stmt);
5991 arg2 = gimple_assign_rhs1 (arg2_stmt);
5992 if (TREE_CODE (arg2) == SSA_NAME)
5993 {
5994 arg2_stmt = va_list_skip_additions (arg2);
5995 if (arg2_stmt == NULL
5996 || !is_gimple_assign (arg2_stmt)
5997 || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
5998 goto escapes;
5999 arg2 = gimple_assign_rhs1 (arg2_stmt);
6000 }
6001 if (arg1 != arg2)
6002 goto escapes;
6003
6004 if (TREE_CODE (arg1) != COMPONENT_REF
6005 || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
6006 || get_base_address (arg1) != base)
6007 goto escapes;
6008
6009 /* Need floating point regs. */
6010 cfun->va_list_fpr_size |= 2;
6011 return false;
6012 }
6013 if (offset_stmt
6014 && is_gimple_assign (offset_stmt)
6015 && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
6016 offset = gimple_assign_rhs1 (offset_stmt);
6017 }
6018 if (TREE_CODE (offset) != COMPONENT_REF
6019 || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
6020 || get_base_address (offset) != base)
6021 goto escapes;
6022 else
6023 /* Need general regs. */
6024 cfun->va_list_fpr_size |= 1;
6025 return false;
6026
6027 escapes:
6028 si->va_list_escapes = true;
6029 return false;
6030 }
6031 #endif
6032
6033 /* Perform any needed actions needed for a function that is receiving a
6034 variable number of arguments. */
6035
6036 static void
6037 alpha_setup_incoming_varargs (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
6038 tree type, int *pretend_size, int no_rtl)
6039 {
6040 CUMULATIVE_ARGS cum = *pcum;
6041
6042 /* Skip the current argument. */
6043 FUNCTION_ARG_ADVANCE (cum, mode, type, 1);
6044
6045 #if TARGET_ABI_UNICOSMK
6046 /* On Unicos/Mk, the standard subroutine __T3E_MISMATCH stores all register
6047 arguments on the stack. Unfortunately, it doesn't always store the first
6048 one (i.e. the one that arrives in $16 or $f16). This is not a problem
6049 with stdargs as we always have at least one named argument there. */
6050 if (cum.num_reg_words < 6)
6051 {
6052 if (!no_rtl)
6053 {
6054 emit_insn (gen_umk_mismatch_args (GEN_INT (cum.num_reg_words)));
6055 emit_insn (gen_arg_home_umk ());
6056 }
6057 *pretend_size = 0;
6058 }
6059 #elif TARGET_ABI_OPEN_VMS
6060 /* For VMS, we allocate space for all 6 arg registers plus a count.
6061
6062 However, if NO registers need to be saved, don't allocate any space.
6063 This is not only because we won't need the space, but because AP
6064 includes the current_pretend_args_size and we don't want to mess up
6065 any ap-relative addresses already made. */
6066 if (cum.num_args < 6)
6067 {
6068 if (!no_rtl)
6069 {
6070 emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
6071 emit_insn (gen_arg_home ());
6072 }
6073 *pretend_size = 7 * UNITS_PER_WORD;
6074 }
6075 #else
6076 /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
6077 only push those that are remaining. However, if NO registers need to
6078 be saved, don't allocate any space. This is not only because we won't
6079 need the space, but because AP includes the current_pretend_args_size
6080 and we don't want to mess up any ap-relative addresses already made.
6081
6082 If we are not to use the floating-point registers, save the integer
6083 registers where we would put the floating-point registers. This is
6084 not the most efficient way to implement varargs with just one register
6085 class, but it isn't worth doing anything more efficient in this rare
6086 case. */
6087 if (cum >= 6)
6088 return;
6089
6090 if (!no_rtl)
6091 {
6092 int count;
6093 alias_set_type set = get_varargs_alias_set ();
6094 rtx tmp;
6095
6096 count = cfun->va_list_gpr_size / UNITS_PER_WORD;
6097 if (count > 6 - cum)
6098 count = 6 - cum;
6099
6100 /* Detect whether integer registers or floating-point registers
6101 are needed by the detected va_arg statements. See above for
6102 how these values are computed. Note that the "escape" value
6103 is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
6104 these bits set. */
6105 gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
6106
6107 if (cfun->va_list_fpr_size & 1)
6108 {
6109 tmp = gen_rtx_MEM (BLKmode,
6110 plus_constant (virtual_incoming_args_rtx,
6111 (cum + 6) * UNITS_PER_WORD));
6112 MEM_NOTRAP_P (tmp) = 1;
6113 set_mem_alias_set (tmp, set);
6114 move_block_from_reg (16 + cum, tmp, count);
6115 }
6116
6117 if (cfun->va_list_fpr_size & 2)
6118 {
6119 tmp = gen_rtx_MEM (BLKmode,
6120 plus_constant (virtual_incoming_args_rtx,
6121 cum * UNITS_PER_WORD));
6122 MEM_NOTRAP_P (tmp) = 1;
6123 set_mem_alias_set (tmp, set);
6124 move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
6125 }
6126 }
6127 *pretend_size = 12 * UNITS_PER_WORD;
6128 #endif
6129 }
6130
6131 static void
6132 alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
6133 {
6134 HOST_WIDE_INT offset;
6135 tree t, offset_field, base_field;
6136
6137 if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
6138 return;
6139
6140 if (TARGET_ABI_UNICOSMK)
6141 std_expand_builtin_va_start (valist, nextarg);
6142
6143 /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
6144 up by 48, storing fp arg registers in the first 48 bytes, and the
6145 integer arg registers in the next 48 bytes. This is only done,
6146 however, if any integer registers need to be stored.
6147
6148 If no integer registers need be stored, then we must subtract 48
6149 in order to account for the integer arg registers which are counted
6150 in argsize above, but which are not actually stored on the stack.
6151 Must further be careful here about structures straddling the last
6152 integer argument register; that futzes with pretend_args_size,
6153 which changes the meaning of AP. */
6154
6155 if (NUM_ARGS < 6)
6156 offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
6157 else
6158 offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
6159
6160 if (TARGET_ABI_OPEN_VMS)
6161 {
6162 nextarg = plus_constant (nextarg, offset);
6163 nextarg = plus_constant (nextarg, NUM_ARGS * UNITS_PER_WORD);
6164 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist,
6165 make_tree (ptr_type_node, nextarg));
6166 TREE_SIDE_EFFECTS (t) = 1;
6167
6168 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6169 }
6170 else
6171 {
6172 base_field = TYPE_FIELDS (TREE_TYPE (valist));
6173 offset_field = TREE_CHAIN (base_field);
6174
6175 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6176 valist, base_field, NULL_TREE);
6177 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6178 valist, offset_field, NULL_TREE);
6179
6180 t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
6181 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
6182 size_int (offset));
6183 t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
6184 TREE_SIDE_EFFECTS (t) = 1;
6185 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6186
6187 t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
6188 t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
6189 TREE_SIDE_EFFECTS (t) = 1;
6190 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6191 }
6192 }
6193
6194 static tree
6195 alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
6196 gimple_seq *pre_p)
6197 {
6198 tree type_size, ptr_type, addend, t, addr;
6199 gimple_seq internal_post;
6200
6201 /* If the type could not be passed in registers, skip the block
6202 reserved for the registers. */
6203 if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
6204 {
6205 t = build_int_cst (TREE_TYPE (offset), 6*8);
6206 gimplify_assign (offset,
6207 build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
6208 pre_p);
6209 }
6210
6211 addend = offset;
6212 ptr_type = build_pointer_type (type);
6213
6214 if (TREE_CODE (type) == COMPLEX_TYPE)
6215 {
6216 tree real_part, imag_part, real_temp;
6217
6218 real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6219 offset, pre_p);
6220
6221 /* Copy the value into a new temporary, lest the formal temporary
6222 be reused out from under us. */
6223 real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
6224
6225 imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
6226 offset, pre_p);
6227
6228 return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
6229 }
6230 else if (TREE_CODE (type) == REAL_TYPE)
6231 {
6232 tree fpaddend, cond, fourtyeight;
6233
6234 fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
6235 fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
6236 addend, fourtyeight);
6237 cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
6238 addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
6239 fpaddend, addend);
6240 }
6241
6242 /* Build the final address and force that value into a temporary. */
6243 addr = build2 (POINTER_PLUS_EXPR, ptr_type, fold_convert (ptr_type, base),
6244 fold_convert (sizetype, addend));
6245 internal_post = NULL;
6246 gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
6247 gimple_seq_add_seq (pre_p, internal_post);
6248
6249 /* Update the offset field. */
6250 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
6251 if (type_size == NULL || TREE_OVERFLOW (type_size))
6252 t = size_zero_node;
6253 else
6254 {
6255 t = size_binop (PLUS_EXPR, type_size, size_int (7));
6256 t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
6257 t = size_binop (MULT_EXPR, t, size_int (8));
6258 }
6259 t = fold_convert (TREE_TYPE (offset), t);
6260 gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
6261 pre_p);
6262
6263 return build_va_arg_indirect_ref (addr);
6264 }
6265
6266 static tree
6267 alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6268 gimple_seq *post_p)
6269 {
6270 tree offset_field, base_field, offset, base, t, r;
6271 bool indirect;
6272
6273 if (TARGET_ABI_OPEN_VMS || TARGET_ABI_UNICOSMK)
6274 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6275
6276 base_field = TYPE_FIELDS (va_list_type_node);
6277 offset_field = TREE_CHAIN (base_field);
6278 base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
6279 valist, base_field, NULL_TREE);
6280 offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
6281 valist, offset_field, NULL_TREE);
6282
6283 /* Pull the fields of the structure out into temporaries. Since we never
6284 modify the base field, we can use a formal temporary. Sign-extend the
6285 offset field so that it's the proper width for pointer arithmetic. */
6286 base = get_formal_tmp_var (base_field, pre_p);
6287
6288 t = fold_convert (lang_hooks.types.type_for_size (64, 0), offset_field);
6289 offset = get_initialized_tmp_var (t, pre_p, NULL);
6290
6291 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6292 if (indirect)
6293 type = build_pointer_type (type);
6294
6295 /* Find the value. Note that this will be a stable indirection, or
6296 a composite of stable indirections in the case of complex. */
6297 r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
6298
6299 /* Stuff the offset temporary back into its field. */
6300 gimplify_assign (unshare_expr (offset_field),
6301 fold_convert (TREE_TYPE (offset_field), offset), pre_p);
6302
6303 if (indirect)
6304 r = build_va_arg_indirect_ref (r);
6305
6306 return r;
6307 }
6308 \f
6309 /* Builtins. */
6310
6311 enum alpha_builtin
6312 {
6313 ALPHA_BUILTIN_CMPBGE,
6314 ALPHA_BUILTIN_EXTBL,
6315 ALPHA_BUILTIN_EXTWL,
6316 ALPHA_BUILTIN_EXTLL,
6317 ALPHA_BUILTIN_EXTQL,
6318 ALPHA_BUILTIN_EXTWH,
6319 ALPHA_BUILTIN_EXTLH,
6320 ALPHA_BUILTIN_EXTQH,
6321 ALPHA_BUILTIN_INSBL,
6322 ALPHA_BUILTIN_INSWL,
6323 ALPHA_BUILTIN_INSLL,
6324 ALPHA_BUILTIN_INSQL,
6325 ALPHA_BUILTIN_INSWH,
6326 ALPHA_BUILTIN_INSLH,
6327 ALPHA_BUILTIN_INSQH,
6328 ALPHA_BUILTIN_MSKBL,
6329 ALPHA_BUILTIN_MSKWL,
6330 ALPHA_BUILTIN_MSKLL,
6331 ALPHA_BUILTIN_MSKQL,
6332 ALPHA_BUILTIN_MSKWH,
6333 ALPHA_BUILTIN_MSKLH,
6334 ALPHA_BUILTIN_MSKQH,
6335 ALPHA_BUILTIN_UMULH,
6336 ALPHA_BUILTIN_ZAP,
6337 ALPHA_BUILTIN_ZAPNOT,
6338 ALPHA_BUILTIN_AMASK,
6339 ALPHA_BUILTIN_IMPLVER,
6340 ALPHA_BUILTIN_RPCC,
6341 ALPHA_BUILTIN_THREAD_POINTER,
6342 ALPHA_BUILTIN_SET_THREAD_POINTER,
6343
6344 /* TARGET_MAX */
6345 ALPHA_BUILTIN_MINUB8,
6346 ALPHA_BUILTIN_MINSB8,
6347 ALPHA_BUILTIN_MINUW4,
6348 ALPHA_BUILTIN_MINSW4,
6349 ALPHA_BUILTIN_MAXUB8,
6350 ALPHA_BUILTIN_MAXSB8,
6351 ALPHA_BUILTIN_MAXUW4,
6352 ALPHA_BUILTIN_MAXSW4,
6353 ALPHA_BUILTIN_PERR,
6354 ALPHA_BUILTIN_PKLB,
6355 ALPHA_BUILTIN_PKWB,
6356 ALPHA_BUILTIN_UNPKBL,
6357 ALPHA_BUILTIN_UNPKBW,
6358
6359 /* TARGET_CIX */
6360 ALPHA_BUILTIN_CTTZ,
6361 ALPHA_BUILTIN_CTLZ,
6362 ALPHA_BUILTIN_CTPOP,
6363
6364 ALPHA_BUILTIN_max
6365 };
6366
6367 static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
6368 CODE_FOR_builtin_cmpbge,
6369 CODE_FOR_builtin_extbl,
6370 CODE_FOR_builtin_extwl,
6371 CODE_FOR_builtin_extll,
6372 CODE_FOR_builtin_extql,
6373 CODE_FOR_builtin_extwh,
6374 CODE_FOR_builtin_extlh,
6375 CODE_FOR_builtin_extqh,
6376 CODE_FOR_builtin_insbl,
6377 CODE_FOR_builtin_inswl,
6378 CODE_FOR_builtin_insll,
6379 CODE_FOR_builtin_insql,
6380 CODE_FOR_builtin_inswh,
6381 CODE_FOR_builtin_inslh,
6382 CODE_FOR_builtin_insqh,
6383 CODE_FOR_builtin_mskbl,
6384 CODE_FOR_builtin_mskwl,
6385 CODE_FOR_builtin_mskll,
6386 CODE_FOR_builtin_mskql,
6387 CODE_FOR_builtin_mskwh,
6388 CODE_FOR_builtin_msklh,
6389 CODE_FOR_builtin_mskqh,
6390 CODE_FOR_umuldi3_highpart,
6391 CODE_FOR_builtin_zap,
6392 CODE_FOR_builtin_zapnot,
6393 CODE_FOR_builtin_amask,
6394 CODE_FOR_builtin_implver,
6395 CODE_FOR_builtin_rpcc,
6396 CODE_FOR_load_tp,
6397 CODE_FOR_set_tp,
6398
6399 /* TARGET_MAX */
6400 CODE_FOR_builtin_minub8,
6401 CODE_FOR_builtin_minsb8,
6402 CODE_FOR_builtin_minuw4,
6403 CODE_FOR_builtin_minsw4,
6404 CODE_FOR_builtin_maxub8,
6405 CODE_FOR_builtin_maxsb8,
6406 CODE_FOR_builtin_maxuw4,
6407 CODE_FOR_builtin_maxsw4,
6408 CODE_FOR_builtin_perr,
6409 CODE_FOR_builtin_pklb,
6410 CODE_FOR_builtin_pkwb,
6411 CODE_FOR_builtin_unpkbl,
6412 CODE_FOR_builtin_unpkbw,
6413
6414 /* TARGET_CIX */
6415 CODE_FOR_ctzdi2,
6416 CODE_FOR_clzdi2,
6417 CODE_FOR_popcountdi2
6418 };
6419
6420 struct alpha_builtin_def
6421 {
6422 const char *name;
6423 enum alpha_builtin code;
6424 unsigned int target_mask;
6425 bool is_const;
6426 };
6427
6428 static struct alpha_builtin_def const zero_arg_builtins[] = {
6429 { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
6430 { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
6431 };
6432
6433 static struct alpha_builtin_def const one_arg_builtins[] = {
6434 { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
6435 { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
6436 { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
6437 { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
6438 { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
6439 { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
6440 { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
6441 { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
6442 };
6443
6444 static struct alpha_builtin_def const two_arg_builtins[] = {
6445 { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
6446 { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
6447 { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
6448 { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
6449 { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
6450 { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
6451 { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
6452 { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
6453 { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
6454 { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
6455 { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
6456 { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
6457 { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
6458 { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
6459 { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
6460 { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
6461 { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
6462 { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
6463 { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
6464 { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
6465 { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
6466 { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
6467 { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
6468 { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
6469 { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
6470 { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
6471 { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
6472 { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
6473 { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
6474 { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
6475 { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
6476 { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
6477 { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
6478 { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
6479 };
6480
6481 static GTY(()) tree alpha_v8qi_u;
6482 static GTY(()) tree alpha_v8qi_s;
6483 static GTY(()) tree alpha_v4hi_u;
6484 static GTY(()) tree alpha_v4hi_s;
6485
6486 /* Helper function of alpha_init_builtins. Add the COUNT built-in
6487 functions pointed to by P, with function type FTYPE. */
6488
6489 static void
6490 alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
6491 tree ftype)
6492 {
6493 tree decl;
6494 size_t i;
6495
6496 for (i = 0; i < count; ++i, ++p)
6497 if ((target_flags & p->target_mask) == p->target_mask)
6498 {
6499 decl = add_builtin_function (p->name, ftype, p->code, BUILT_IN_MD,
6500 NULL, NULL);
6501 if (p->is_const)
6502 TREE_READONLY (decl) = 1;
6503 TREE_NOTHROW (decl) = 1;
6504 }
6505 }
6506
6507
6508 static void
6509 alpha_init_builtins (void)
6510 {
6511 tree dimode_integer_type_node;
6512 tree ftype, decl;
6513
6514 dimode_integer_type_node = lang_hooks.types.type_for_mode (DImode, 0);
6515
6516 ftype = build_function_type (dimode_integer_type_node, void_list_node);
6517 alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins),
6518 ftype);
6519
6520 ftype = build_function_type_list (dimode_integer_type_node,
6521 dimode_integer_type_node, NULL_TREE);
6522 alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins),
6523 ftype);
6524
6525 ftype = build_function_type_list (dimode_integer_type_node,
6526 dimode_integer_type_node,
6527 dimode_integer_type_node, NULL_TREE);
6528 alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins),
6529 ftype);
6530
6531 ftype = build_function_type (ptr_type_node, void_list_node);
6532 decl = add_builtin_function ("__builtin_thread_pointer", ftype,
6533 ALPHA_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
6534 NULL, NULL);
6535 TREE_NOTHROW (decl) = 1;
6536
6537 ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
6538 decl = add_builtin_function ("__builtin_set_thread_pointer", ftype,
6539 ALPHA_BUILTIN_SET_THREAD_POINTER, BUILT_IN_MD,
6540 NULL, NULL);
6541 TREE_NOTHROW (decl) = 1;
6542
6543 alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
6544 alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
6545 alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
6546 alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
6547 }
6548
6549 /* Expand an expression EXP that calls a built-in function,
6550 with result going to TARGET if that's convenient
6551 (and in mode MODE if that's convenient).
6552 SUBTARGET may be used as the target for computing one of EXP's operands.
6553 IGNORE is nonzero if the value is to be ignored. */
6554
6555 static rtx
6556 alpha_expand_builtin (tree exp, rtx target,
6557 rtx subtarget ATTRIBUTE_UNUSED,
6558 enum machine_mode mode ATTRIBUTE_UNUSED,
6559 int ignore ATTRIBUTE_UNUSED)
6560 {
6561 #define MAX_ARGS 2
6562
6563 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
6564 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
6565 tree arg;
6566 call_expr_arg_iterator iter;
6567 enum insn_code icode;
6568 rtx op[MAX_ARGS], pat;
6569 int arity;
6570 bool nonvoid;
6571
6572 if (fcode >= ALPHA_BUILTIN_max)
6573 internal_error ("bad builtin fcode");
6574 icode = code_for_builtin[fcode];
6575 if (icode == 0)
6576 internal_error ("bad builtin fcode");
6577
6578 nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
6579
6580 arity = 0;
6581 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
6582 {
6583 const struct insn_operand_data *insn_op;
6584
6585 if (arg == error_mark_node)
6586 return NULL_RTX;
6587 if (arity > MAX_ARGS)
6588 return NULL_RTX;
6589
6590 insn_op = &insn_data[icode].operand[arity + nonvoid];
6591
6592 op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
6593
6594 if (!(*insn_op->predicate) (op[arity], insn_op->mode))
6595 op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
6596 arity++;
6597 }
6598
6599 if (nonvoid)
6600 {
6601 enum machine_mode tmode = insn_data[icode].operand[0].mode;
6602 if (!target
6603 || GET_MODE (target) != tmode
6604 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
6605 target = gen_reg_rtx (tmode);
6606 }
6607
6608 switch (arity)
6609 {
6610 case 0:
6611 pat = GEN_FCN (icode) (target);
6612 break;
6613 case 1:
6614 if (nonvoid)
6615 pat = GEN_FCN (icode) (target, op[0]);
6616 else
6617 pat = GEN_FCN (icode) (op[0]);
6618 break;
6619 case 2:
6620 pat = GEN_FCN (icode) (target, op[0], op[1]);
6621 break;
6622 default:
6623 gcc_unreachable ();
6624 }
6625 if (!pat)
6626 return NULL_RTX;
6627 emit_insn (pat);
6628
6629 if (nonvoid)
6630 return target;
6631 else
6632 return const0_rtx;
6633 }
6634
6635
6636 /* Several bits below assume HWI >= 64 bits. This should be enforced
6637 by config.gcc. */
6638 #if HOST_BITS_PER_WIDE_INT < 64
6639 # error "HOST_WIDE_INT too small"
6640 #endif
6641
6642 /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
6643 with an 8-bit output vector. OPINT contains the integer operands; bit N
6644 of OP_CONST is set if OPINT[N] is valid. */
6645
6646 static tree
6647 alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
6648 {
6649 if (op_const == 3)
6650 {
6651 int i, val;
6652 for (i = 0, val = 0; i < 8; ++i)
6653 {
6654 unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
6655 unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
6656 if (c0 >= c1)
6657 val |= 1 << i;
6658 }
6659 return build_int_cst (long_integer_type_node, val);
6660 }
6661 else if (op_const == 2 && opint[1] == 0)
6662 return build_int_cst (long_integer_type_node, 0xff);
6663 return NULL;
6664 }
6665
6666 /* Fold the builtin for the ZAPNOT instruction. This is essentially a
6667 specialized form of an AND operation. Other byte manipulation instructions
6668 are defined in terms of this instruction, so this is also used as a
6669 subroutine for other builtins.
6670
6671 OP contains the tree operands; OPINT contains the extracted integer values.
6672 Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
6673 OPINT may be considered. */
6674
6675 static tree
6676 alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
6677 long op_const)
6678 {
6679 if (op_const & 2)
6680 {
6681 unsigned HOST_WIDE_INT mask = 0;
6682 int i;
6683
6684 for (i = 0; i < 8; ++i)
6685 if ((opint[1] >> i) & 1)
6686 mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
6687
6688 if (op_const & 1)
6689 return build_int_cst (long_integer_type_node, opint[0] & mask);
6690
6691 if (op)
6692 return fold_build2 (BIT_AND_EXPR, long_integer_type_node, op[0],
6693 build_int_cst (long_integer_type_node, mask));
6694 }
6695 else if ((op_const & 1) && opint[0] == 0)
6696 return build_int_cst (long_integer_type_node, 0);
6697 return NULL;
6698 }
6699
6700 /* Fold the builtins for the EXT family of instructions. */
6701
6702 static tree
6703 alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
6704 long op_const, unsigned HOST_WIDE_INT bytemask,
6705 bool is_high)
6706 {
6707 long zap_const = 2;
6708 tree *zap_op = NULL;
6709
6710 if (op_const & 2)
6711 {
6712 unsigned HOST_WIDE_INT loc;
6713
6714 loc = opint[1] & 7;
6715 if (BYTES_BIG_ENDIAN)
6716 loc ^= 7;
6717 loc *= 8;
6718
6719 if (loc != 0)
6720 {
6721 if (op_const & 1)
6722 {
6723 unsigned HOST_WIDE_INT temp = opint[0];
6724 if (is_high)
6725 temp <<= loc;
6726 else
6727 temp >>= loc;
6728 opint[0] = temp;
6729 zap_const = 3;
6730 }
6731 }
6732 else
6733 zap_op = op;
6734 }
6735
6736 opint[1] = bytemask;
6737 return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
6738 }
6739
6740 /* Fold the builtins for the INS family of instructions. */
6741
6742 static tree
6743 alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
6744 long op_const, unsigned HOST_WIDE_INT bytemask,
6745 bool is_high)
6746 {
6747 if ((op_const & 1) && opint[0] == 0)
6748 return build_int_cst (long_integer_type_node, 0);
6749
6750 if (op_const & 2)
6751 {
6752 unsigned HOST_WIDE_INT temp, loc, byteloc;
6753 tree *zap_op = NULL;
6754
6755 loc = opint[1] & 7;
6756 if (BYTES_BIG_ENDIAN)
6757 loc ^= 7;
6758 bytemask <<= loc;
6759
6760 temp = opint[0];
6761 if (is_high)
6762 {
6763 byteloc = (64 - (loc * 8)) & 0x3f;
6764 if (byteloc == 0)
6765 zap_op = op;
6766 else
6767 temp >>= byteloc;
6768 bytemask >>= 8;
6769 }
6770 else
6771 {
6772 byteloc = loc * 8;
6773 if (byteloc == 0)
6774 zap_op = op;
6775 else
6776 temp <<= byteloc;
6777 }
6778
6779 opint[0] = temp;
6780 opint[1] = bytemask;
6781 return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
6782 }
6783
6784 return NULL;
6785 }
6786
6787 static tree
6788 alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
6789 long op_const, unsigned HOST_WIDE_INT bytemask,
6790 bool is_high)
6791 {
6792 if (op_const & 2)
6793 {
6794 unsigned HOST_WIDE_INT loc;
6795
6796 loc = opint[1] & 7;
6797 if (BYTES_BIG_ENDIAN)
6798 loc ^= 7;
6799 bytemask <<= loc;
6800
6801 if (is_high)
6802 bytemask >>= 8;
6803
6804 opint[1] = bytemask ^ 0xff;
6805 }
6806
6807 return alpha_fold_builtin_zapnot (op, opint, op_const);
6808 }
6809
6810 static tree
6811 alpha_fold_builtin_umulh (unsigned HOST_WIDE_INT opint[], long op_const)
6812 {
6813 switch (op_const)
6814 {
6815 case 3:
6816 {
6817 unsigned HOST_WIDE_INT l;
6818 HOST_WIDE_INT h;
6819
6820 mul_double (opint[0], 0, opint[1], 0, &l, &h);
6821
6822 #if HOST_BITS_PER_WIDE_INT > 64
6823 # error fixme
6824 #endif
6825
6826 return build_int_cst (long_integer_type_node, h);
6827 }
6828
6829 case 1:
6830 opint[1] = opint[0];
6831 /* FALLTHRU */
6832 case 2:
6833 /* Note that (X*1) >> 64 == 0. */
6834 if (opint[1] == 0 || opint[1] == 1)
6835 return build_int_cst (long_integer_type_node, 0);
6836 break;
6837 }
6838 return NULL;
6839 }
6840
6841 static tree
6842 alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
6843 {
6844 tree op0 = fold_convert (vtype, op[0]);
6845 tree op1 = fold_convert (vtype, op[1]);
6846 tree val = fold_build2 (code, vtype, op0, op1);
6847 return fold_build1 (VIEW_CONVERT_EXPR, long_integer_type_node, val);
6848 }
6849
6850 static tree
6851 alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
6852 {
6853 unsigned HOST_WIDE_INT temp = 0;
6854 int i;
6855
6856 if (op_const != 3)
6857 return NULL;
6858
6859 for (i = 0; i < 8; ++i)
6860 {
6861 unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
6862 unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
6863 if (a >= b)
6864 temp += a - b;
6865 else
6866 temp += b - a;
6867 }
6868
6869 return build_int_cst (long_integer_type_node, temp);
6870 }
6871
6872 static tree
6873 alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
6874 {
6875 unsigned HOST_WIDE_INT temp;
6876
6877 if (op_const == 0)
6878 return NULL;
6879
6880 temp = opint[0] & 0xff;
6881 temp |= (opint[0] >> 24) & 0xff00;
6882
6883 return build_int_cst (long_integer_type_node, temp);
6884 }
6885
6886 static tree
6887 alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
6888 {
6889 unsigned HOST_WIDE_INT temp;
6890
6891 if (op_const == 0)
6892 return NULL;
6893
6894 temp = opint[0] & 0xff;
6895 temp |= (opint[0] >> 8) & 0xff00;
6896 temp |= (opint[0] >> 16) & 0xff0000;
6897 temp |= (opint[0] >> 24) & 0xff000000;
6898
6899 return build_int_cst (long_integer_type_node, temp);
6900 }
6901
6902 static tree
6903 alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
6904 {
6905 unsigned HOST_WIDE_INT temp;
6906
6907 if (op_const == 0)
6908 return NULL;
6909
6910 temp = opint[0] & 0xff;
6911 temp |= (opint[0] & 0xff00) << 24;
6912
6913 return build_int_cst (long_integer_type_node, temp);
6914 }
6915
6916 static tree
6917 alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
6918 {
6919 unsigned HOST_WIDE_INT temp;
6920
6921 if (op_const == 0)
6922 return NULL;
6923
6924 temp = opint[0] & 0xff;
6925 temp |= (opint[0] & 0x0000ff00) << 8;
6926 temp |= (opint[0] & 0x00ff0000) << 16;
6927 temp |= (opint[0] & 0xff000000) << 24;
6928
6929 return build_int_cst (long_integer_type_node, temp);
6930 }
6931
6932 static tree
6933 alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
6934 {
6935 unsigned HOST_WIDE_INT temp;
6936
6937 if (op_const == 0)
6938 return NULL;
6939
6940 if (opint[0] == 0)
6941 temp = 64;
6942 else
6943 temp = exact_log2 (opint[0] & -opint[0]);
6944
6945 return build_int_cst (long_integer_type_node, temp);
6946 }
6947
6948 static tree
6949 alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
6950 {
6951 unsigned HOST_WIDE_INT temp;
6952
6953 if (op_const == 0)
6954 return NULL;
6955
6956 if (opint[0] == 0)
6957 temp = 64;
6958 else
6959 temp = 64 - floor_log2 (opint[0]) - 1;
6960
6961 return build_int_cst (long_integer_type_node, temp);
6962 }
6963
6964 static tree
6965 alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
6966 {
6967 unsigned HOST_WIDE_INT temp, op;
6968
6969 if (op_const == 0)
6970 return NULL;
6971
6972 op = opint[0];
6973 temp = 0;
6974 while (op)
6975 temp++, op &= op - 1;
6976
6977 return build_int_cst (long_integer_type_node, temp);
6978 }
6979
6980 /* Fold one of our builtin functions. */
6981
6982 static tree
6983 alpha_fold_builtin (tree fndecl, tree arglist, bool ignore ATTRIBUTE_UNUSED)
6984 {
6985 tree op[MAX_ARGS], t;
6986 unsigned HOST_WIDE_INT opint[MAX_ARGS];
6987 long op_const = 0, arity = 0;
6988
6989 for (t = arglist; t ; t = TREE_CHAIN (t), ++arity)
6990 {
6991 tree arg = TREE_VALUE (t);
6992 if (arg == error_mark_node)
6993 return NULL;
6994 if (arity >= MAX_ARGS)
6995 return NULL;
6996
6997 op[arity] = arg;
6998 opint[arity] = 0;
6999 if (TREE_CODE (arg) == INTEGER_CST)
7000 {
7001 op_const |= 1L << arity;
7002 opint[arity] = int_cst_value (arg);
7003 }
7004 }
7005
7006 switch (DECL_FUNCTION_CODE (fndecl))
7007 {
7008 case ALPHA_BUILTIN_CMPBGE:
7009 return alpha_fold_builtin_cmpbge (opint, op_const);
7010
7011 case ALPHA_BUILTIN_EXTBL:
7012 return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
7013 case ALPHA_BUILTIN_EXTWL:
7014 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
7015 case ALPHA_BUILTIN_EXTLL:
7016 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
7017 case ALPHA_BUILTIN_EXTQL:
7018 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
7019 case ALPHA_BUILTIN_EXTWH:
7020 return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
7021 case ALPHA_BUILTIN_EXTLH:
7022 return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
7023 case ALPHA_BUILTIN_EXTQH:
7024 return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
7025
7026 case ALPHA_BUILTIN_INSBL:
7027 return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
7028 case ALPHA_BUILTIN_INSWL:
7029 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
7030 case ALPHA_BUILTIN_INSLL:
7031 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
7032 case ALPHA_BUILTIN_INSQL:
7033 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
7034 case ALPHA_BUILTIN_INSWH:
7035 return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
7036 case ALPHA_BUILTIN_INSLH:
7037 return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
7038 case ALPHA_BUILTIN_INSQH:
7039 return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
7040
7041 case ALPHA_BUILTIN_MSKBL:
7042 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
7043 case ALPHA_BUILTIN_MSKWL:
7044 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
7045 case ALPHA_BUILTIN_MSKLL:
7046 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
7047 case ALPHA_BUILTIN_MSKQL:
7048 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
7049 case ALPHA_BUILTIN_MSKWH:
7050 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
7051 case ALPHA_BUILTIN_MSKLH:
7052 return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
7053 case ALPHA_BUILTIN_MSKQH:
7054 return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
7055
7056 case ALPHA_BUILTIN_UMULH:
7057 return alpha_fold_builtin_umulh (opint, op_const);
7058
7059 case ALPHA_BUILTIN_ZAP:
7060 opint[1] ^= 0xff;
7061 /* FALLTHRU */
7062 case ALPHA_BUILTIN_ZAPNOT:
7063 return alpha_fold_builtin_zapnot (op, opint, op_const);
7064
7065 case ALPHA_BUILTIN_MINUB8:
7066 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
7067 case ALPHA_BUILTIN_MINSB8:
7068 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
7069 case ALPHA_BUILTIN_MINUW4:
7070 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
7071 case ALPHA_BUILTIN_MINSW4:
7072 return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
7073 case ALPHA_BUILTIN_MAXUB8:
7074 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
7075 case ALPHA_BUILTIN_MAXSB8:
7076 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
7077 case ALPHA_BUILTIN_MAXUW4:
7078 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
7079 case ALPHA_BUILTIN_MAXSW4:
7080 return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
7081
7082 case ALPHA_BUILTIN_PERR:
7083 return alpha_fold_builtin_perr (opint, op_const);
7084 case ALPHA_BUILTIN_PKLB:
7085 return alpha_fold_builtin_pklb (opint, op_const);
7086 case ALPHA_BUILTIN_PKWB:
7087 return alpha_fold_builtin_pkwb (opint, op_const);
7088 case ALPHA_BUILTIN_UNPKBL:
7089 return alpha_fold_builtin_unpkbl (opint, op_const);
7090 case ALPHA_BUILTIN_UNPKBW:
7091 return alpha_fold_builtin_unpkbw (opint, op_const);
7092
7093 case ALPHA_BUILTIN_CTTZ:
7094 return alpha_fold_builtin_cttz (opint, op_const);
7095 case ALPHA_BUILTIN_CTLZ:
7096 return alpha_fold_builtin_ctlz (opint, op_const);
7097 case ALPHA_BUILTIN_CTPOP:
7098 return alpha_fold_builtin_ctpop (opint, op_const);
7099
7100 case ALPHA_BUILTIN_AMASK:
7101 case ALPHA_BUILTIN_IMPLVER:
7102 case ALPHA_BUILTIN_RPCC:
7103 case ALPHA_BUILTIN_THREAD_POINTER:
7104 case ALPHA_BUILTIN_SET_THREAD_POINTER:
7105 /* None of these are foldable at compile-time. */
7106 default:
7107 return NULL;
7108 }
7109 }
7110 \f
7111 /* This page contains routines that are used to determine what the function
7112 prologue and epilogue code will do and write them out. */
7113
7114 /* Compute the size of the save area in the stack. */
7115
7116 /* These variables are used for communication between the following functions.
7117 They indicate various things about the current function being compiled
7118 that are used to tell what kind of prologue, epilogue and procedure
7119 descriptor to generate. */
7120
7121 /* Nonzero if we need a stack procedure. */
7122 enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
7123 static enum alpha_procedure_types alpha_procedure_type;
7124
7125 /* Register number (either FP or SP) that is used to unwind the frame. */
7126 static int vms_unwind_regno;
7127
7128 /* Register number used to save FP. We need not have one for RA since
7129 we don't modify it for register procedures. This is only defined
7130 for register frame procedures. */
7131 static int vms_save_fp_regno;
7132
7133 /* Register number used to reference objects off our PV. */
7134 static int vms_base_regno;
7135
7136 /* Compute register masks for saved registers. */
7137
7138 static void
7139 alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
7140 {
7141 unsigned long imask = 0;
7142 unsigned long fmask = 0;
7143 unsigned int i;
7144
7145 /* When outputting a thunk, we don't have valid register life info,
7146 but assemble_start_function wants to output .frame and .mask
7147 directives. */
7148 if (cfun->is_thunk)
7149 {
7150 *imaskP = 0;
7151 *fmaskP = 0;
7152 return;
7153 }
7154
7155 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7156 imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
7157
7158 /* One for every register we have to save. */
7159 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7160 if (! fixed_regs[i] && ! call_used_regs[i]
7161 && df_regs_ever_live_p (i) && i != REG_RA
7162 && (!TARGET_ABI_UNICOSMK || i != HARD_FRAME_POINTER_REGNUM))
7163 {
7164 if (i < 32)
7165 imask |= (1UL << i);
7166 else
7167 fmask |= (1UL << (i - 32));
7168 }
7169
7170 /* We need to restore these for the handler. */
7171 if (crtl->calls_eh_return)
7172 {
7173 for (i = 0; ; ++i)
7174 {
7175 unsigned regno = EH_RETURN_DATA_REGNO (i);
7176 if (regno == INVALID_REGNUM)
7177 break;
7178 imask |= 1UL << regno;
7179 }
7180 }
7181
7182 /* If any register spilled, then spill the return address also. */
7183 /* ??? This is required by the Digital stack unwind specification
7184 and isn't needed if we're doing Dwarf2 unwinding. */
7185 if (imask || fmask || alpha_ra_ever_killed ())
7186 imask |= (1UL << REG_RA);
7187
7188 *imaskP = imask;
7189 *fmaskP = fmask;
7190 }
7191
7192 int
7193 alpha_sa_size (void)
7194 {
7195 unsigned long mask[2];
7196 int sa_size = 0;
7197 int i, j;
7198
7199 alpha_sa_mask (&mask[0], &mask[1]);
7200
7201 if (TARGET_ABI_UNICOSMK)
7202 {
7203 if (mask[0] || mask[1])
7204 sa_size = 14;
7205 }
7206 else
7207 {
7208 for (j = 0; j < 2; ++j)
7209 for (i = 0; i < 32; ++i)
7210 if ((mask[j] >> i) & 1)
7211 sa_size++;
7212 }
7213
7214 if (TARGET_ABI_UNICOSMK)
7215 {
7216 /* We might not need to generate a frame if we don't make any calls
7217 (including calls to __T3E_MISMATCH if this is a vararg function),
7218 don't have any local variables which require stack slots, don't
7219 use alloca and have not determined that we need a frame for other
7220 reasons. */
7221
7222 alpha_procedure_type
7223 = (sa_size || get_frame_size() != 0
7224 || crtl->outgoing_args_size
7225 || cfun->stdarg || cfun->calls_alloca
7226 || frame_pointer_needed)
7227 ? PT_STACK : PT_REGISTER;
7228
7229 /* Always reserve space for saving callee-saved registers if we
7230 need a frame as required by the calling convention. */
7231 if (alpha_procedure_type == PT_STACK)
7232 sa_size = 14;
7233 }
7234 else if (TARGET_ABI_OPEN_VMS)
7235 {
7236 /* Start by assuming we can use a register procedure if we don't
7237 make any calls (REG_RA not used) or need to save any
7238 registers and a stack procedure if we do. */
7239 if ((mask[0] >> REG_RA) & 1)
7240 alpha_procedure_type = PT_STACK;
7241 else if (get_frame_size() != 0)
7242 alpha_procedure_type = PT_REGISTER;
7243 else
7244 alpha_procedure_type = PT_NULL;
7245
7246 /* Don't reserve space for saving FP & RA yet. Do that later after we've
7247 made the final decision on stack procedure vs register procedure. */
7248 if (alpha_procedure_type == PT_STACK)
7249 sa_size -= 2;
7250
7251 /* Decide whether to refer to objects off our PV via FP or PV.
7252 If we need FP for something else or if we receive a nonlocal
7253 goto (which expects PV to contain the value), we must use PV.
7254 Otherwise, start by assuming we can use FP. */
7255
7256 vms_base_regno
7257 = (frame_pointer_needed
7258 || cfun->has_nonlocal_label
7259 || alpha_procedure_type == PT_STACK
7260 || crtl->outgoing_args_size)
7261 ? REG_PV : HARD_FRAME_POINTER_REGNUM;
7262
7263 /* If we want to copy PV into FP, we need to find some register
7264 in which to save FP. */
7265
7266 vms_save_fp_regno = -1;
7267 if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
7268 for (i = 0; i < 32; i++)
7269 if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
7270 vms_save_fp_regno = i;
7271
7272 if (vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
7273 vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
7274 else if (alpha_procedure_type == PT_NULL)
7275 vms_base_regno = REG_PV;
7276
7277 /* Stack unwinding should be done via FP unless we use it for PV. */
7278 vms_unwind_regno = (vms_base_regno == REG_PV
7279 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
7280
7281 /* If this is a stack procedure, allow space for saving FP and RA. */
7282 if (alpha_procedure_type == PT_STACK)
7283 sa_size += 2;
7284 }
7285 else
7286 {
7287 /* Our size must be even (multiple of 16 bytes). */
7288 if (sa_size & 1)
7289 sa_size++;
7290 }
7291
7292 return sa_size * 8;
7293 }
7294
7295 /* Define the offset between two registers, one to be eliminated,
7296 and the other its replacement, at the start of a routine. */
7297
7298 HOST_WIDE_INT
7299 alpha_initial_elimination_offset (unsigned int from,
7300 unsigned int to ATTRIBUTE_UNUSED)
7301 {
7302 HOST_WIDE_INT ret;
7303
7304 ret = alpha_sa_size ();
7305 ret += ALPHA_ROUND (crtl->outgoing_args_size);
7306
7307 switch (from)
7308 {
7309 case FRAME_POINTER_REGNUM:
7310 break;
7311
7312 case ARG_POINTER_REGNUM:
7313 ret += (ALPHA_ROUND (get_frame_size ()
7314 + crtl->args.pretend_args_size)
7315 - crtl->args.pretend_args_size);
7316 break;
7317
7318 default:
7319 gcc_unreachable ();
7320 }
7321
7322 return ret;
7323 }
7324
7325 int
7326 alpha_pv_save_size (void)
7327 {
7328 alpha_sa_size ();
7329 return alpha_procedure_type == PT_STACK ? 8 : 0;
7330 }
7331
7332 int
7333 alpha_using_fp (void)
7334 {
7335 alpha_sa_size ();
7336 return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
7337 }
7338
7339 #if TARGET_ABI_OPEN_VMS
7340
7341 static const struct attribute_spec vms_attribute_table[] =
7342 {
7343 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7344 { "overlaid", 0, 0, true, false, false, NULL },
7345 { "global", 0, 0, true, false, false, NULL },
7346 { "initialize", 0, 0, true, false, false, NULL },
7347 { NULL, 0, 0, false, false, false, NULL }
7348 };
7349
7350 #endif
7351
7352 static int
7353 find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
7354 {
7355 return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
7356 }
7357
7358 int
7359 alpha_find_lo_sum_using_gp (rtx insn)
7360 {
7361 return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
7362 }
7363
7364 static int
7365 alpha_does_function_need_gp (void)
7366 {
7367 rtx insn;
7368
7369 /* The GP being variable is an OSF abi thing. */
7370 if (! TARGET_ABI_OSF)
7371 return 0;
7372
7373 /* We need the gp to load the address of __mcount. */
7374 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7375 return 1;
7376
7377 /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
7378 if (cfun->is_thunk)
7379 return 1;
7380
7381 /* The nonlocal receiver pattern assumes that the gp is valid for
7382 the nested function. Reasonable because it's almost always set
7383 correctly already. For the cases where that's wrong, make sure
7384 the nested function loads its gp on entry. */
7385 if (crtl->has_nonlocal_goto)
7386 return 1;
7387
7388 /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
7389 Even if we are a static function, we still need to do this in case
7390 our address is taken and passed to something like qsort. */
7391
7392 push_topmost_sequence ();
7393 insn = get_insns ();
7394 pop_topmost_sequence ();
7395
7396 for (; insn; insn = NEXT_INSN (insn))
7397 if (INSN_P (insn)
7398 && ! JUMP_TABLE_DATA_P (insn)
7399 && GET_CODE (PATTERN (insn)) != USE
7400 && GET_CODE (PATTERN (insn)) != CLOBBER
7401 && get_attr_usegp (insn))
7402 return 1;
7403
7404 return 0;
7405 }
7406
7407 \f
7408 /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
7409 sequences. */
7410
7411 static rtx
7412 set_frame_related_p (void)
7413 {
7414 rtx seq = get_insns ();
7415 rtx insn;
7416
7417 end_sequence ();
7418
7419 if (!seq)
7420 return NULL_RTX;
7421
7422 if (INSN_P (seq))
7423 {
7424 insn = seq;
7425 while (insn != NULL_RTX)
7426 {
7427 RTX_FRAME_RELATED_P (insn) = 1;
7428 insn = NEXT_INSN (insn);
7429 }
7430 seq = emit_insn (seq);
7431 }
7432 else
7433 {
7434 seq = emit_insn (seq);
7435 RTX_FRAME_RELATED_P (seq) = 1;
7436 }
7437 return seq;
7438 }
7439
7440 #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
7441
7442 /* Generates a store with the proper unwind info attached. VALUE is
7443 stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
7444 contains SP+FRAME_BIAS, and that is the unwind info that should be
7445 generated. If FRAME_REG != VALUE, then VALUE is being stored on
7446 behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
7447
7448 static void
7449 emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
7450 HOST_WIDE_INT base_ofs, rtx frame_reg)
7451 {
7452 rtx addr, mem, insn;
7453
7454 addr = plus_constant (base_reg, base_ofs);
7455 mem = gen_rtx_MEM (DImode, addr);
7456 set_mem_alias_set (mem, alpha_sr_alias_set);
7457
7458 insn = emit_move_insn (mem, value);
7459 RTX_FRAME_RELATED_P (insn) = 1;
7460
7461 if (frame_bias || value != frame_reg)
7462 {
7463 if (frame_bias)
7464 {
7465 addr = plus_constant (stack_pointer_rtx, frame_bias + base_ofs);
7466 mem = gen_rtx_MEM (DImode, addr);
7467 }
7468
7469 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
7470 gen_rtx_SET (VOIDmode, mem, frame_reg));
7471 }
7472 }
7473
7474 static void
7475 emit_frame_store (unsigned int regno, rtx base_reg,
7476 HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
7477 {
7478 rtx reg = gen_rtx_REG (DImode, regno);
7479 emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
7480 }
7481
7482 /* Write function prologue. */
7483
7484 /* On vms we have two kinds of functions:
7485
7486 - stack frame (PROC_STACK)
7487 these are 'normal' functions with local vars and which are
7488 calling other functions
7489 - register frame (PROC_REGISTER)
7490 keeps all data in registers, needs no stack
7491
7492 We must pass this to the assembler so it can generate the
7493 proper pdsc (procedure descriptor)
7494 This is done with the '.pdesc' command.
7495
7496 On not-vms, we don't really differentiate between the two, as we can
7497 simply allocate stack without saving registers. */
7498
7499 void
7500 alpha_expand_prologue (void)
7501 {
7502 /* Registers to save. */
7503 unsigned long imask = 0;
7504 unsigned long fmask = 0;
7505 /* Stack space needed for pushing registers clobbered by us. */
7506 HOST_WIDE_INT sa_size;
7507 /* Complete stack size needed. */
7508 HOST_WIDE_INT frame_size;
7509 /* Offset from base reg to register save area. */
7510 HOST_WIDE_INT reg_offset;
7511 rtx sa_reg;
7512 int i;
7513
7514 sa_size = alpha_sa_size ();
7515
7516 frame_size = get_frame_size ();
7517 if (TARGET_ABI_OPEN_VMS)
7518 frame_size = ALPHA_ROUND (sa_size
7519 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7520 + frame_size
7521 + crtl->args.pretend_args_size);
7522 else if (TARGET_ABI_UNICOSMK)
7523 /* We have to allocate space for the DSIB if we generate a frame. */
7524 frame_size = ALPHA_ROUND (sa_size
7525 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7526 + ALPHA_ROUND (frame_size
7527 + crtl->outgoing_args_size);
7528 else
7529 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7530 + sa_size
7531 + ALPHA_ROUND (frame_size
7532 + crtl->args.pretend_args_size));
7533
7534 if (TARGET_ABI_OPEN_VMS)
7535 reg_offset = 8;
7536 else
7537 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7538
7539 alpha_sa_mask (&imask, &fmask);
7540
7541 /* Emit an insn to reload GP, if needed. */
7542 if (TARGET_ABI_OSF)
7543 {
7544 alpha_function_needs_gp = alpha_does_function_need_gp ();
7545 if (alpha_function_needs_gp)
7546 emit_insn (gen_prologue_ldgp ());
7547 }
7548
7549 /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
7550 the call to mcount ourselves, rather than having the linker do it
7551 magically in response to -pg. Since _mcount has special linkage,
7552 don't represent the call as a call. */
7553 if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
7554 emit_insn (gen_prologue_mcount ());
7555
7556 if (TARGET_ABI_UNICOSMK)
7557 unicosmk_gen_dsib (&imask);
7558
7559 /* Adjust the stack by the frame size. If the frame size is > 4096
7560 bytes, we need to be sure we probe somewhere in the first and last
7561 4096 bytes (we can probably get away without the latter test) and
7562 every 8192 bytes in between. If the frame size is > 32768, we
7563 do this in a loop. Otherwise, we generate the explicit probe
7564 instructions.
7565
7566 Note that we are only allowed to adjust sp once in the prologue. */
7567
7568 if (frame_size <= 32768)
7569 {
7570 if (frame_size > 4096)
7571 {
7572 int probed;
7573
7574 for (probed = 4096; probed < frame_size; probed += 8192)
7575 emit_insn (gen_probe_stack (GEN_INT (TARGET_ABI_UNICOSMK
7576 ? -probed + 64
7577 : -probed)));
7578
7579 /* We only have to do this probe if we aren't saving registers. */
7580 if (sa_size == 0 && frame_size > probed - 4096)
7581 emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
7582 }
7583
7584 if (frame_size != 0)
7585 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
7586 GEN_INT (TARGET_ABI_UNICOSMK
7587 ? -frame_size + 64
7588 : -frame_size))));
7589 }
7590 else
7591 {
7592 /* Here we generate code to set R22 to SP + 4096 and set R23 to the
7593 number of 8192 byte blocks to probe. We then probe each block
7594 in the loop and then set SP to the proper location. If the
7595 amount remaining is > 4096, we have to do one more probe if we
7596 are not saving any registers. */
7597
7598 HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
7599 HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
7600 rtx ptr = gen_rtx_REG (DImode, 22);
7601 rtx count = gen_rtx_REG (DImode, 23);
7602 rtx seq;
7603
7604 emit_move_insn (count, GEN_INT (blocks));
7605 emit_insn (gen_adddi3 (ptr, stack_pointer_rtx,
7606 GEN_INT (TARGET_ABI_UNICOSMK ? 4096 - 64 : 4096)));
7607
7608 /* Because of the difficulty in emitting a new basic block this
7609 late in the compilation, generate the loop as a single insn. */
7610 emit_insn (gen_prologue_stack_probe_loop (count, ptr));
7611
7612 if (leftover > 4096 && sa_size == 0)
7613 {
7614 rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
7615 MEM_VOLATILE_P (last) = 1;
7616 emit_move_insn (last, const0_rtx);
7617 }
7618
7619 if (TARGET_ABI_WINDOWS_NT)
7620 {
7621 /* For NT stack unwind (done by 'reverse execution'), it's
7622 not OK to take the result of a loop, even though the value
7623 is already in ptr, so we reload it via a single operation
7624 and subtract it to sp.
7625
7626 Yes, that's correct -- we have to reload the whole constant
7627 into a temporary via ldah+lda then subtract from sp. */
7628
7629 HOST_WIDE_INT lo, hi;
7630 lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
7631 hi = frame_size - lo;
7632
7633 emit_move_insn (ptr, GEN_INT (hi));
7634 emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
7635 seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
7636 ptr));
7637 }
7638 else
7639 {
7640 seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
7641 GEN_INT (-leftover)));
7642 }
7643
7644 /* This alternative is special, because the DWARF code cannot
7645 possibly intuit through the loop above. So we invent this
7646 note it looks at instead. */
7647 RTX_FRAME_RELATED_P (seq) = 1;
7648 add_reg_note (seq, REG_FRAME_RELATED_EXPR,
7649 gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7650 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
7651 GEN_INT (TARGET_ABI_UNICOSMK
7652 ? -frame_size + 64
7653 : -frame_size))));
7654 }
7655
7656 if (!TARGET_ABI_UNICOSMK)
7657 {
7658 HOST_WIDE_INT sa_bias = 0;
7659
7660 /* Cope with very large offsets to the register save area. */
7661 sa_reg = stack_pointer_rtx;
7662 if (reg_offset + sa_size > 0x8000)
7663 {
7664 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
7665 rtx sa_bias_rtx;
7666
7667 if (low + sa_size <= 0x8000)
7668 sa_bias = reg_offset - low, reg_offset = low;
7669 else
7670 sa_bias = reg_offset, reg_offset = 0;
7671
7672 sa_reg = gen_rtx_REG (DImode, 24);
7673 sa_bias_rtx = GEN_INT (sa_bias);
7674
7675 if (add_operand (sa_bias_rtx, DImode))
7676 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
7677 else
7678 {
7679 emit_move_insn (sa_reg, sa_bias_rtx);
7680 emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
7681 }
7682 }
7683
7684 /* Save regs in stack order. Beginning with VMS PV. */
7685 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
7686 emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
7687
7688 /* Save register RA next. */
7689 if (imask & (1UL << REG_RA))
7690 {
7691 emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
7692 imask &= ~(1UL << REG_RA);
7693 reg_offset += 8;
7694 }
7695
7696 /* Now save any other registers required to be saved. */
7697 for (i = 0; i < 31; i++)
7698 if (imask & (1UL << i))
7699 {
7700 emit_frame_store (i, sa_reg, sa_bias, reg_offset);
7701 reg_offset += 8;
7702 }
7703
7704 for (i = 0; i < 31; i++)
7705 if (fmask & (1UL << i))
7706 {
7707 emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
7708 reg_offset += 8;
7709 }
7710 }
7711 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
7712 {
7713 /* The standard frame on the T3E includes space for saving registers.
7714 We just have to use it. We don't have to save the return address and
7715 the old frame pointer here - they are saved in the DSIB. */
7716
7717 reg_offset = -56;
7718 for (i = 9; i < 15; i++)
7719 if (imask & (1UL << i))
7720 {
7721 emit_frame_store (i, hard_frame_pointer_rtx, 0, reg_offset);
7722 reg_offset -= 8;
7723 }
7724 for (i = 2; i < 10; i++)
7725 if (fmask & (1UL << i))
7726 {
7727 emit_frame_store (i+32, hard_frame_pointer_rtx, 0, reg_offset);
7728 reg_offset -= 8;
7729 }
7730 }
7731
7732 if (TARGET_ABI_OPEN_VMS)
7733 {
7734 /* Register frame procedures save the fp. */
7735 if (alpha_procedure_type == PT_REGISTER)
7736 {
7737 rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
7738 hard_frame_pointer_rtx);
7739 add_reg_note (insn, REG_CFA_REGISTER, NULL);
7740 RTX_FRAME_RELATED_P (insn) = 1;
7741 }
7742
7743 if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
7744 emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
7745 gen_rtx_REG (DImode, REG_PV)));
7746
7747 if (alpha_procedure_type != PT_NULL
7748 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
7749 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7750
7751 /* If we have to allocate space for outgoing args, do it now. */
7752 if (crtl->outgoing_args_size != 0)
7753 {
7754 rtx seq
7755 = emit_move_insn (stack_pointer_rtx,
7756 plus_constant
7757 (hard_frame_pointer_rtx,
7758 - (ALPHA_ROUND
7759 (crtl->outgoing_args_size))));
7760
7761 /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
7762 if ! frame_pointer_needed. Setting the bit will change the CFA
7763 computation rule to use sp again, which would be wrong if we had
7764 frame_pointer_needed, as this means sp might move unpredictably
7765 later on.
7766
7767 Also, note that
7768 frame_pointer_needed
7769 => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
7770 and
7771 crtl->outgoing_args_size != 0
7772 => alpha_procedure_type != PT_NULL,
7773
7774 so when we are not setting the bit here, we are guaranteed to
7775 have emitted an FRP frame pointer update just before. */
7776 RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
7777 }
7778 }
7779 else if (!TARGET_ABI_UNICOSMK)
7780 {
7781 /* If we need a frame pointer, set it from the stack pointer. */
7782 if (frame_pointer_needed)
7783 {
7784 if (TARGET_CAN_FAULT_IN_PROLOGUE)
7785 FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
7786 else
7787 /* This must always be the last instruction in the
7788 prologue, thus we emit a special move + clobber. */
7789 FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
7790 stack_pointer_rtx, sa_reg)));
7791 }
7792 }
7793
7794 /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
7795 the prologue, for exception handling reasons, we cannot do this for
7796 any insn that might fault. We could prevent this for mems with a
7797 (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
7798 have to prevent all such scheduling with a blockage.
7799
7800 Linux, on the other hand, never bothered to implement OSF/1's
7801 exception handling, and so doesn't care about such things. Anyone
7802 planning to use dwarf2 frame-unwind info can also omit the blockage. */
7803
7804 if (! TARGET_CAN_FAULT_IN_PROLOGUE)
7805 emit_insn (gen_blockage ());
7806 }
7807
7808 /* Count the number of .file directives, so that .loc is up to date. */
7809 int num_source_filenames = 0;
7810
7811 /* Output the textual info surrounding the prologue. */
7812
7813 void
7814 alpha_start_function (FILE *file, const char *fnname,
7815 tree decl ATTRIBUTE_UNUSED)
7816 {
7817 unsigned long imask = 0;
7818 unsigned long fmask = 0;
7819 /* Stack space needed for pushing registers clobbered by us. */
7820 HOST_WIDE_INT sa_size;
7821 /* Complete stack size needed. */
7822 unsigned HOST_WIDE_INT frame_size;
7823 /* The maximum debuggable frame size (512 Kbytes using Tru64 as). */
7824 unsigned HOST_WIDE_INT max_frame_size = TARGET_ABI_OSF && !TARGET_GAS
7825 ? 524288
7826 : 1UL << 31;
7827 /* Offset from base reg to register save area. */
7828 HOST_WIDE_INT reg_offset;
7829 char *entry_label = (char *) alloca (strlen (fnname) + 6);
7830 int i;
7831
7832 /* Don't emit an extern directive for functions defined in the same file. */
7833 if (TARGET_ABI_UNICOSMK)
7834 {
7835 tree name_tree;
7836 name_tree = get_identifier (fnname);
7837 TREE_ASM_WRITTEN (name_tree) = 1;
7838 }
7839
7840 alpha_fnname = fnname;
7841 sa_size = alpha_sa_size ();
7842
7843 frame_size = get_frame_size ();
7844 if (TARGET_ABI_OPEN_VMS)
7845 frame_size = ALPHA_ROUND (sa_size
7846 + (alpha_procedure_type == PT_STACK ? 8 : 0)
7847 + frame_size
7848 + crtl->args.pretend_args_size);
7849 else if (TARGET_ABI_UNICOSMK)
7850 frame_size = ALPHA_ROUND (sa_size
7851 + (alpha_procedure_type == PT_STACK ? 48 : 0))
7852 + ALPHA_ROUND (frame_size
7853 + crtl->outgoing_args_size);
7854 else
7855 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
7856 + sa_size
7857 + ALPHA_ROUND (frame_size
7858 + crtl->args.pretend_args_size));
7859
7860 if (TARGET_ABI_OPEN_VMS)
7861 reg_offset = 8;
7862 else
7863 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
7864
7865 alpha_sa_mask (&imask, &fmask);
7866
7867 /* Ecoff can handle multiple .file directives, so put out file and lineno.
7868 We have to do that before the .ent directive as we cannot switch
7869 files within procedures with native ecoff because line numbers are
7870 linked to procedure descriptors.
7871 Outputting the lineno helps debugging of one line functions as they
7872 would otherwise get no line number at all. Please note that we would
7873 like to put out last_linenum from final.c, but it is not accessible. */
7874
7875 if (write_symbols == SDB_DEBUG)
7876 {
7877 #ifdef ASM_OUTPUT_SOURCE_FILENAME
7878 ASM_OUTPUT_SOURCE_FILENAME (file,
7879 DECL_SOURCE_FILE (current_function_decl));
7880 #endif
7881 #ifdef SDB_OUTPUT_SOURCE_LINE
7882 if (debug_info_level != DINFO_LEVEL_TERSE)
7883 SDB_OUTPUT_SOURCE_LINE (file,
7884 DECL_SOURCE_LINE (current_function_decl));
7885 #endif
7886 }
7887
7888 /* Issue function start and label. */
7889 if (TARGET_ABI_OPEN_VMS
7890 || (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive))
7891 {
7892 fputs ("\t.ent ", file);
7893 assemble_name (file, fnname);
7894 putc ('\n', file);
7895
7896 /* If the function needs GP, we'll write the "..ng" label there.
7897 Otherwise, do it here. */
7898 if (TARGET_ABI_OSF
7899 && ! alpha_function_needs_gp
7900 && ! cfun->is_thunk)
7901 {
7902 putc ('$', file);
7903 assemble_name (file, fnname);
7904 fputs ("..ng:\n", file);
7905 }
7906 }
7907
7908 strcpy (entry_label, fnname);
7909 if (TARGET_ABI_OPEN_VMS)
7910 strcat (entry_label, "..en");
7911
7912 /* For public functions, the label must be globalized by appending an
7913 additional colon. */
7914 if (TARGET_ABI_UNICOSMK && TREE_PUBLIC (decl))
7915 strcat (entry_label, ":");
7916
7917 ASM_OUTPUT_LABEL (file, entry_label);
7918 inside_function = TRUE;
7919
7920 if (TARGET_ABI_OPEN_VMS)
7921 fprintf (file, "\t.base $%d\n", vms_base_regno);
7922
7923 if (!TARGET_ABI_OPEN_VMS && !TARGET_ABI_UNICOSMK && TARGET_IEEE_CONFORMANT
7924 && !flag_inhibit_size_directive)
7925 {
7926 /* Set flags in procedure descriptor to request IEEE-conformant
7927 math-library routines. The value we set it to is PDSC_EXC_IEEE
7928 (/usr/include/pdsc.h). */
7929 fputs ("\t.eflag 48\n", file);
7930 }
7931
7932 /* Set up offsets to alpha virtual arg/local debugging pointer. */
7933 alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
7934 alpha_arg_offset = -frame_size + 48;
7935
7936 /* Describe our frame. If the frame size is larger than an integer,
7937 print it as zero to avoid an assembler error. We won't be
7938 properly describing such a frame, but that's the best we can do. */
7939 if (TARGET_ABI_UNICOSMK)
7940 ;
7941 else if (TARGET_ABI_OPEN_VMS)
7942 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
7943 HOST_WIDE_INT_PRINT_DEC "\n",
7944 vms_unwind_regno,
7945 frame_size >= (1UL << 31) ? 0 : frame_size,
7946 reg_offset);
7947 else if (!flag_inhibit_size_directive)
7948 fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
7949 (frame_pointer_needed
7950 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
7951 frame_size >= max_frame_size ? 0 : frame_size,
7952 crtl->args.pretend_args_size);
7953
7954 /* Describe which registers were spilled. */
7955 if (TARGET_ABI_UNICOSMK)
7956 ;
7957 else if (TARGET_ABI_OPEN_VMS)
7958 {
7959 if (imask)
7960 /* ??? Does VMS care if mask contains ra? The old code didn't
7961 set it, so I don't here. */
7962 fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
7963 if (fmask)
7964 fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
7965 if (alpha_procedure_type == PT_REGISTER)
7966 fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
7967 }
7968 else if (!flag_inhibit_size_directive)
7969 {
7970 if (imask)
7971 {
7972 fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
7973 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7974
7975 for (i = 0; i < 32; ++i)
7976 if (imask & (1UL << i))
7977 reg_offset += 8;
7978 }
7979
7980 if (fmask)
7981 fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
7982 frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
7983 }
7984
7985 #if TARGET_ABI_OPEN_VMS
7986 /* Ifdef'ed cause link_section are only available then. */
7987 switch_to_section (readonly_data_section);
7988 fprintf (file, "\t.align 3\n");
7989 assemble_name (file, fnname); fputs ("..na:\n", file);
7990 fputs ("\t.ascii \"", file);
7991 assemble_name (file, fnname);
7992 fputs ("\\0\"\n", file);
7993 alpha_need_linkage (fnname, 1);
7994 switch_to_section (text_section);
7995 #endif
7996 }
7997
7998 /* Emit the .prologue note at the scheduled end of the prologue. */
7999
8000 static void
8001 alpha_output_function_end_prologue (FILE *file)
8002 {
8003 if (TARGET_ABI_UNICOSMK)
8004 ;
8005 else if (TARGET_ABI_OPEN_VMS)
8006 fputs ("\t.prologue\n", file);
8007 else if (TARGET_ABI_WINDOWS_NT)
8008 fputs ("\t.prologue 0\n", file);
8009 else if (!flag_inhibit_size_directive)
8010 fprintf (file, "\t.prologue %d\n",
8011 alpha_function_needs_gp || cfun->is_thunk);
8012 }
8013
8014 /* Write function epilogue. */
8015
8016 void
8017 alpha_expand_epilogue (void)
8018 {
8019 /* Registers to save. */
8020 unsigned long imask = 0;
8021 unsigned long fmask = 0;
8022 /* Stack space needed for pushing registers clobbered by us. */
8023 HOST_WIDE_INT sa_size;
8024 /* Complete stack size needed. */
8025 HOST_WIDE_INT frame_size;
8026 /* Offset from base reg to register save area. */
8027 HOST_WIDE_INT reg_offset;
8028 int fp_is_frame_pointer, fp_offset;
8029 rtx sa_reg, sa_reg_exp = NULL;
8030 rtx sp_adj1, sp_adj2, mem, reg, insn;
8031 rtx eh_ofs;
8032 rtx cfa_restores = NULL_RTX;
8033 int i;
8034
8035 sa_size = alpha_sa_size ();
8036
8037 frame_size = get_frame_size ();
8038 if (TARGET_ABI_OPEN_VMS)
8039 frame_size = ALPHA_ROUND (sa_size
8040 + (alpha_procedure_type == PT_STACK ? 8 : 0)
8041 + frame_size
8042 + crtl->args.pretend_args_size);
8043 else if (TARGET_ABI_UNICOSMK)
8044 frame_size = ALPHA_ROUND (sa_size
8045 + (alpha_procedure_type == PT_STACK ? 48 : 0))
8046 + ALPHA_ROUND (frame_size
8047 + crtl->outgoing_args_size);
8048 else
8049 frame_size = (ALPHA_ROUND (crtl->outgoing_args_size)
8050 + sa_size
8051 + ALPHA_ROUND (frame_size
8052 + crtl->args.pretend_args_size));
8053
8054 if (TARGET_ABI_OPEN_VMS)
8055 {
8056 if (alpha_procedure_type == PT_STACK)
8057 reg_offset = 8;
8058 else
8059 reg_offset = 0;
8060 }
8061 else
8062 reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
8063
8064 alpha_sa_mask (&imask, &fmask);
8065
8066 fp_is_frame_pointer
8067 = ((TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
8068 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed));
8069 fp_offset = 0;
8070 sa_reg = stack_pointer_rtx;
8071
8072 if (crtl->calls_eh_return)
8073 eh_ofs = EH_RETURN_STACKADJ_RTX;
8074 else
8075 eh_ofs = NULL_RTX;
8076
8077 if (!TARGET_ABI_UNICOSMK && sa_size)
8078 {
8079 /* If we have a frame pointer, restore SP from it. */
8080 if ((TARGET_ABI_OPEN_VMS
8081 && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
8082 || (!TARGET_ABI_OPEN_VMS && frame_pointer_needed))
8083 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
8084
8085 /* Cope with very large offsets to the register save area. */
8086 if (reg_offset + sa_size > 0x8000)
8087 {
8088 int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
8089 HOST_WIDE_INT bias;
8090
8091 if (low + sa_size <= 0x8000)
8092 bias = reg_offset - low, reg_offset = low;
8093 else
8094 bias = reg_offset, reg_offset = 0;
8095
8096 sa_reg = gen_rtx_REG (DImode, 22);
8097 sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
8098
8099 emit_move_insn (sa_reg, sa_reg_exp);
8100 }
8101
8102 /* Restore registers in order, excepting a true frame pointer. */
8103
8104 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset));
8105 if (! eh_ofs)
8106 set_mem_alias_set (mem, alpha_sr_alias_set);
8107 reg = gen_rtx_REG (DImode, REG_RA);
8108 emit_move_insn (reg, mem);
8109 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8110
8111 reg_offset += 8;
8112 imask &= ~(1UL << REG_RA);
8113
8114 for (i = 0; i < 31; ++i)
8115 if (imask & (1UL << i))
8116 {
8117 if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
8118 fp_offset = reg_offset;
8119 else
8120 {
8121 mem = gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset));
8122 set_mem_alias_set (mem, alpha_sr_alias_set);
8123 reg = gen_rtx_REG (DImode, i);
8124 emit_move_insn (reg, mem);
8125 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8126 cfa_restores);
8127 }
8128 reg_offset += 8;
8129 }
8130
8131 for (i = 0; i < 31; ++i)
8132 if (fmask & (1UL << i))
8133 {
8134 mem = gen_rtx_MEM (DFmode, plus_constant(sa_reg, reg_offset));
8135 set_mem_alias_set (mem, alpha_sr_alias_set);
8136 reg = gen_rtx_REG (DFmode, i+32);
8137 emit_move_insn (reg, mem);
8138 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8139 reg_offset += 8;
8140 }
8141 }
8142 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type == PT_STACK)
8143 {
8144 /* Restore callee-saved general-purpose registers. */
8145
8146 reg_offset = -56;
8147
8148 for (i = 9; i < 15; i++)
8149 if (imask & (1UL << i))
8150 {
8151 mem = gen_rtx_MEM (DImode, plus_constant(hard_frame_pointer_rtx,
8152 reg_offset));
8153 set_mem_alias_set (mem, alpha_sr_alias_set);
8154 reg = gen_rtx_REG (DImode, i);
8155 emit_move_insn (reg, mem);
8156 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8157 reg_offset -= 8;
8158 }
8159
8160 for (i = 2; i < 10; i++)
8161 if (fmask & (1UL << i))
8162 {
8163 mem = gen_rtx_MEM (DFmode, plus_constant(hard_frame_pointer_rtx,
8164 reg_offset));
8165 set_mem_alias_set (mem, alpha_sr_alias_set);
8166 reg = gen_rtx_REG (DFmode, i+32);
8167 emit_move_insn (reg, mem);
8168 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8169 reg_offset -= 8;
8170 }
8171
8172 /* Restore the return address from the DSIB. */
8173 mem = gen_rtx_MEM (DImode, plus_constant (hard_frame_pointer_rtx, -8));
8174 set_mem_alias_set (mem, alpha_sr_alias_set);
8175 reg = gen_rtx_REG (DImode, REG_RA);
8176 emit_move_insn (reg, mem);
8177 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
8178 }
8179
8180 if (frame_size || eh_ofs)
8181 {
8182 sp_adj1 = stack_pointer_rtx;
8183
8184 if (eh_ofs)
8185 {
8186 sp_adj1 = gen_rtx_REG (DImode, 23);
8187 emit_move_insn (sp_adj1,
8188 gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
8189 }
8190
8191 /* If the stack size is large, begin computation into a temporary
8192 register so as not to interfere with a potential fp restore,
8193 which must be consecutive with an SP restore. */
8194 if (frame_size < 32768
8195 && ! (TARGET_ABI_UNICOSMK && cfun->calls_alloca))
8196 sp_adj2 = GEN_INT (frame_size);
8197 else if (TARGET_ABI_UNICOSMK)
8198 {
8199 sp_adj1 = gen_rtx_REG (DImode, 23);
8200 emit_move_insn (sp_adj1, hard_frame_pointer_rtx);
8201 sp_adj2 = const0_rtx;
8202 }
8203 else if (frame_size < 0x40007fffL)
8204 {
8205 int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
8206
8207 sp_adj2 = plus_constant (sp_adj1, frame_size - low);
8208 if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
8209 sp_adj1 = sa_reg;
8210 else
8211 {
8212 sp_adj1 = gen_rtx_REG (DImode, 23);
8213 emit_move_insn (sp_adj1, sp_adj2);
8214 }
8215 sp_adj2 = GEN_INT (low);
8216 }
8217 else
8218 {
8219 rtx tmp = gen_rtx_REG (DImode, 23);
8220 sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
8221 if (!sp_adj2)
8222 {
8223 /* We can't drop new things to memory this late, afaik,
8224 so build it up by pieces. */
8225 sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
8226 -(frame_size < 0));
8227 gcc_assert (sp_adj2);
8228 }
8229 }
8230
8231 /* From now on, things must be in order. So emit blockages. */
8232
8233 /* Restore the frame pointer. */
8234 if (TARGET_ABI_UNICOSMK)
8235 {
8236 emit_insn (gen_blockage ());
8237 mem = gen_rtx_MEM (DImode,
8238 plus_constant (hard_frame_pointer_rtx, -16));
8239 set_mem_alias_set (mem, alpha_sr_alias_set);
8240 emit_move_insn (hard_frame_pointer_rtx, mem);
8241 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8242 hard_frame_pointer_rtx, cfa_restores);
8243 }
8244 else if (fp_is_frame_pointer)
8245 {
8246 emit_insn (gen_blockage ());
8247 mem = gen_rtx_MEM (DImode, plus_constant (sa_reg, fp_offset));
8248 set_mem_alias_set (mem, alpha_sr_alias_set);
8249 emit_move_insn (hard_frame_pointer_rtx, mem);
8250 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8251 hard_frame_pointer_rtx, cfa_restores);
8252 }
8253 else if (TARGET_ABI_OPEN_VMS)
8254 {
8255 emit_insn (gen_blockage ());
8256 emit_move_insn (hard_frame_pointer_rtx,
8257 gen_rtx_REG (DImode, vms_save_fp_regno));
8258 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
8259 hard_frame_pointer_rtx, cfa_restores);
8260 }
8261
8262 /* Restore the stack pointer. */
8263 emit_insn (gen_blockage ());
8264 if (sp_adj2 == const0_rtx)
8265 insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
8266 else
8267 insn = emit_move_insn (stack_pointer_rtx,
8268 gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
8269 REG_NOTES (insn) = cfa_restores;
8270 add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
8271 RTX_FRAME_RELATED_P (insn) = 1;
8272 }
8273 else
8274 {
8275 gcc_assert (cfa_restores == NULL);
8276
8277 if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
8278 {
8279 emit_insn (gen_blockage ());
8280 insn = emit_move_insn (hard_frame_pointer_rtx,
8281 gen_rtx_REG (DImode, vms_save_fp_regno));
8282 add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8283 RTX_FRAME_RELATED_P (insn) = 1;
8284 }
8285 else if (TARGET_ABI_UNICOSMK && alpha_procedure_type != PT_STACK)
8286 {
8287 /* Decrement the frame pointer if the function does not have a
8288 frame. */
8289 emit_insn (gen_blockage ());
8290 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
8291 hard_frame_pointer_rtx, constm1_rtx));
8292 }
8293 }
8294 }
8295 \f
8296 /* Output the rest of the textual info surrounding the epilogue. */
8297
8298 void
8299 alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
8300 {
8301 rtx insn;
8302
8303 /* We output a nop after noreturn calls at the very end of the function to
8304 ensure that the return address always remains in the caller's code range,
8305 as not doing so might confuse unwinding engines. */
8306 insn = get_last_insn ();
8307 if (!INSN_P (insn))
8308 insn = prev_active_insn (insn);
8309 if (CALL_P (insn))
8310 output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
8311
8312 #if TARGET_ABI_OSF
8313 if (cfun->is_thunk)
8314 free_after_compilation (cfun);
8315 #endif
8316
8317 #if TARGET_ABI_OPEN_VMS
8318 alpha_write_linkage (file, fnname, decl);
8319 #endif
8320
8321 /* End the function. */
8322 if (!TARGET_ABI_UNICOSMK && !flag_inhibit_size_directive)
8323 {
8324 fputs ("\t.end ", file);
8325 assemble_name (file, fnname);
8326 putc ('\n', file);
8327 }
8328 inside_function = FALSE;
8329
8330 /* Output jump tables and the static subroutine information block. */
8331 if (TARGET_ABI_UNICOSMK)
8332 {
8333 unicosmk_output_ssib (file, fnname);
8334 unicosmk_output_deferred_case_vectors (file);
8335 }
8336 }
8337
8338 #if TARGET_ABI_OSF
8339 /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
8340
8341 In order to avoid the hordes of differences between generated code
8342 with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
8343 lots of code loading up large constants, generate rtl and emit it
8344 instead of going straight to text.
8345
8346 Not sure why this idea hasn't been explored before... */
8347
8348 static void
8349 alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8350 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8351 tree function)
8352 {
8353 HOST_WIDE_INT hi, lo;
8354 rtx this_rtx, insn, funexp;
8355
8356 gcc_assert (cfun->is_thunk);
8357
8358 /* We always require a valid GP. */
8359 emit_insn (gen_prologue_ldgp ());
8360 emit_note (NOTE_INSN_PROLOGUE_END);
8361
8362 /* Find the "this" pointer. If the function returns a structure,
8363 the structure return pointer is in $16. */
8364 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8365 this_rtx = gen_rtx_REG (Pmode, 17);
8366 else
8367 this_rtx = gen_rtx_REG (Pmode, 16);
8368
8369 /* Add DELTA. When possible we use ldah+lda. Otherwise load the
8370 entire constant for the add. */
8371 lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
8372 hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8373 if (hi + lo == delta)
8374 {
8375 if (hi)
8376 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
8377 if (lo)
8378 emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
8379 }
8380 else
8381 {
8382 rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
8383 delta, -(delta < 0));
8384 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8385 }
8386
8387 /* Add a delta stored in the vtable at VCALL_OFFSET. */
8388 if (vcall_offset)
8389 {
8390 rtx tmp, tmp2;
8391
8392 tmp = gen_rtx_REG (Pmode, 0);
8393 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
8394
8395 lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
8396 hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
8397 if (hi + lo == vcall_offset)
8398 {
8399 if (hi)
8400 emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
8401 }
8402 else
8403 {
8404 tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
8405 vcall_offset, -(vcall_offset < 0));
8406 emit_insn (gen_adddi3 (tmp, tmp, tmp2));
8407 lo = 0;
8408 }
8409 if (lo)
8410 tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
8411 else
8412 tmp2 = tmp;
8413 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
8414
8415 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
8416 }
8417
8418 /* Generate a tail call to the target function. */
8419 if (! TREE_USED (function))
8420 {
8421 assemble_external (function);
8422 TREE_USED (function) = 1;
8423 }
8424 funexp = XEXP (DECL_RTL (function), 0);
8425 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8426 insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
8427 SIBLING_CALL_P (insn) = 1;
8428
8429 /* Run just enough of rest_of_compilation to get the insns emitted.
8430 There's not really enough bulk here to make other passes such as
8431 instruction scheduling worth while. Note that use_thunk calls
8432 assemble_start_function and assemble_end_function. */
8433 insn = get_insns ();
8434 insn_locators_alloc ();
8435 shorten_branches (insn);
8436 final_start_function (insn, file, 1);
8437 final (insn, file, 1);
8438 final_end_function ();
8439 }
8440 #endif /* TARGET_ABI_OSF */
8441 \f
8442 /* Debugging support. */
8443
8444 #include "gstab.h"
8445
8446 /* Count the number of sdb related labels are generated (to find block
8447 start and end boundaries). */
8448
8449 int sdb_label_count = 0;
8450
8451 /* Name of the file containing the current function. */
8452
8453 static const char *current_function_file = "";
8454
8455 /* Offsets to alpha virtual arg/local debugging pointers. */
8456
8457 long alpha_arg_offset;
8458 long alpha_auto_offset;
8459 \f
8460 /* Emit a new filename to a stream. */
8461
8462 void
8463 alpha_output_filename (FILE *stream, const char *name)
8464 {
8465 static int first_time = TRUE;
8466
8467 if (first_time)
8468 {
8469 first_time = FALSE;
8470 ++num_source_filenames;
8471 current_function_file = name;
8472 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8473 output_quoted_string (stream, name);
8474 fprintf (stream, "\n");
8475 if (!TARGET_GAS && write_symbols == DBX_DEBUG)
8476 fprintf (stream, "\t#@stabs\n");
8477 }
8478
8479 else if (write_symbols == DBX_DEBUG)
8480 /* dbxout.c will emit an appropriate .stabs directive. */
8481 return;
8482
8483 else if (name != current_function_file
8484 && strcmp (name, current_function_file) != 0)
8485 {
8486 if (inside_function && ! TARGET_GAS)
8487 fprintf (stream, "\t#.file\t%d ", num_source_filenames);
8488 else
8489 {
8490 ++num_source_filenames;
8491 current_function_file = name;
8492 fprintf (stream, "\t.file\t%d ", num_source_filenames);
8493 }
8494
8495 output_quoted_string (stream, name);
8496 fprintf (stream, "\n");
8497 }
8498 }
8499 \f
8500 /* Structure to show the current status of registers and memory. */
8501
8502 struct shadow_summary
8503 {
8504 struct {
8505 unsigned int i : 31; /* Mask of int regs */
8506 unsigned int fp : 31; /* Mask of fp regs */
8507 unsigned int mem : 1; /* mem == imem | fpmem */
8508 } used, defd;
8509 };
8510
8511 /* Summary the effects of expression X on the machine. Update SUM, a pointer
8512 to the summary structure. SET is nonzero if the insn is setting the
8513 object, otherwise zero. */
8514
8515 static void
8516 summarize_insn (rtx x, struct shadow_summary *sum, int set)
8517 {
8518 const char *format_ptr;
8519 int i, j;
8520
8521 if (x == 0)
8522 return;
8523
8524 switch (GET_CODE (x))
8525 {
8526 /* ??? Note that this case would be incorrect if the Alpha had a
8527 ZERO_EXTRACT in SET_DEST. */
8528 case SET:
8529 summarize_insn (SET_SRC (x), sum, 0);
8530 summarize_insn (SET_DEST (x), sum, 1);
8531 break;
8532
8533 case CLOBBER:
8534 summarize_insn (XEXP (x, 0), sum, 1);
8535 break;
8536
8537 case USE:
8538 summarize_insn (XEXP (x, 0), sum, 0);
8539 break;
8540
8541 case ASM_OPERANDS:
8542 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
8543 summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
8544 break;
8545
8546 case PARALLEL:
8547 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
8548 summarize_insn (XVECEXP (x, 0, i), sum, 0);
8549 break;
8550
8551 case SUBREG:
8552 summarize_insn (SUBREG_REG (x), sum, 0);
8553 break;
8554
8555 case REG:
8556 {
8557 int regno = REGNO (x);
8558 unsigned long mask = ((unsigned long) 1) << (regno % 32);
8559
8560 if (regno == 31 || regno == 63)
8561 break;
8562
8563 if (set)
8564 {
8565 if (regno < 32)
8566 sum->defd.i |= mask;
8567 else
8568 sum->defd.fp |= mask;
8569 }
8570 else
8571 {
8572 if (regno < 32)
8573 sum->used.i |= mask;
8574 else
8575 sum->used.fp |= mask;
8576 }
8577 }
8578 break;
8579
8580 case MEM:
8581 if (set)
8582 sum->defd.mem = 1;
8583 else
8584 sum->used.mem = 1;
8585
8586 /* Find the regs used in memory address computation: */
8587 summarize_insn (XEXP (x, 0), sum, 0);
8588 break;
8589
8590 case CONST_INT: case CONST_DOUBLE:
8591 case SYMBOL_REF: case LABEL_REF: case CONST:
8592 case SCRATCH: case ASM_INPUT:
8593 break;
8594
8595 /* Handle common unary and binary ops for efficiency. */
8596 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
8597 case MOD: case UDIV: case UMOD: case AND: case IOR:
8598 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
8599 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
8600 case NE: case EQ: case GE: case GT: case LE:
8601 case LT: case GEU: case GTU: case LEU: case LTU:
8602 summarize_insn (XEXP (x, 0), sum, 0);
8603 summarize_insn (XEXP (x, 1), sum, 0);
8604 break;
8605
8606 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
8607 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
8608 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
8609 case SQRT: case FFS:
8610 summarize_insn (XEXP (x, 0), sum, 0);
8611 break;
8612
8613 default:
8614 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
8615 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8616 switch (format_ptr[i])
8617 {
8618 case 'e':
8619 summarize_insn (XEXP (x, i), sum, 0);
8620 break;
8621
8622 case 'E':
8623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8624 summarize_insn (XVECEXP (x, i, j), sum, 0);
8625 break;
8626
8627 case 'i':
8628 break;
8629
8630 default:
8631 gcc_unreachable ();
8632 }
8633 }
8634 }
8635
8636 /* Ensure a sufficient number of `trapb' insns are in the code when
8637 the user requests code with a trap precision of functions or
8638 instructions.
8639
8640 In naive mode, when the user requests a trap-precision of
8641 "instruction", a trapb is needed after every instruction that may
8642 generate a trap. This ensures that the code is resumption safe but
8643 it is also slow.
8644
8645 When optimizations are turned on, we delay issuing a trapb as long
8646 as possible. In this context, a trap shadow is the sequence of
8647 instructions that starts with a (potentially) trap generating
8648 instruction and extends to the next trapb or call_pal instruction
8649 (but GCC never generates call_pal by itself). We can delay (and
8650 therefore sometimes omit) a trapb subject to the following
8651 conditions:
8652
8653 (a) On entry to the trap shadow, if any Alpha register or memory
8654 location contains a value that is used as an operand value by some
8655 instruction in the trap shadow (live on entry), then no instruction
8656 in the trap shadow may modify the register or memory location.
8657
8658 (b) Within the trap shadow, the computation of the base register
8659 for a memory load or store instruction may not involve using the
8660 result of an instruction that might generate an UNPREDICTABLE
8661 result.
8662
8663 (c) Within the trap shadow, no register may be used more than once
8664 as a destination register. (This is to make life easier for the
8665 trap-handler.)
8666
8667 (d) The trap shadow may not include any branch instructions. */
8668
8669 static void
8670 alpha_handle_trap_shadows (void)
8671 {
8672 struct shadow_summary shadow;
8673 int trap_pending, exception_nesting;
8674 rtx i, n;
8675
8676 trap_pending = 0;
8677 exception_nesting = 0;
8678 shadow.used.i = 0;
8679 shadow.used.fp = 0;
8680 shadow.used.mem = 0;
8681 shadow.defd = shadow.used;
8682
8683 for (i = get_insns (); i ; i = NEXT_INSN (i))
8684 {
8685 if (NOTE_P (i))
8686 {
8687 switch (NOTE_KIND (i))
8688 {
8689 case NOTE_INSN_EH_REGION_BEG:
8690 exception_nesting++;
8691 if (trap_pending)
8692 goto close_shadow;
8693 break;
8694
8695 case NOTE_INSN_EH_REGION_END:
8696 exception_nesting--;
8697 if (trap_pending)
8698 goto close_shadow;
8699 break;
8700
8701 case NOTE_INSN_EPILOGUE_BEG:
8702 if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
8703 goto close_shadow;
8704 break;
8705 }
8706 }
8707 else if (trap_pending)
8708 {
8709 if (alpha_tp == ALPHA_TP_FUNC)
8710 {
8711 if (JUMP_P (i)
8712 && GET_CODE (PATTERN (i)) == RETURN)
8713 goto close_shadow;
8714 }
8715 else if (alpha_tp == ALPHA_TP_INSN)
8716 {
8717 if (optimize > 0)
8718 {
8719 struct shadow_summary sum;
8720
8721 sum.used.i = 0;
8722 sum.used.fp = 0;
8723 sum.used.mem = 0;
8724 sum.defd = sum.used;
8725
8726 switch (GET_CODE (i))
8727 {
8728 case INSN:
8729 /* Annoyingly, get_attr_trap will die on these. */
8730 if (GET_CODE (PATTERN (i)) == USE
8731 || GET_CODE (PATTERN (i)) == CLOBBER)
8732 break;
8733
8734 summarize_insn (PATTERN (i), &sum, 0);
8735
8736 if ((sum.defd.i & shadow.defd.i)
8737 || (sum.defd.fp & shadow.defd.fp))
8738 {
8739 /* (c) would be violated */
8740 goto close_shadow;
8741 }
8742
8743 /* Combine shadow with summary of current insn: */
8744 shadow.used.i |= sum.used.i;
8745 shadow.used.fp |= sum.used.fp;
8746 shadow.used.mem |= sum.used.mem;
8747 shadow.defd.i |= sum.defd.i;
8748 shadow.defd.fp |= sum.defd.fp;
8749 shadow.defd.mem |= sum.defd.mem;
8750
8751 if ((sum.defd.i & shadow.used.i)
8752 || (sum.defd.fp & shadow.used.fp)
8753 || (sum.defd.mem & shadow.used.mem))
8754 {
8755 /* (a) would be violated (also takes care of (b)) */
8756 gcc_assert (get_attr_trap (i) != TRAP_YES
8757 || (!(sum.defd.i & sum.used.i)
8758 && !(sum.defd.fp & sum.used.fp)));
8759
8760 goto close_shadow;
8761 }
8762 break;
8763
8764 case JUMP_INSN:
8765 case CALL_INSN:
8766 case CODE_LABEL:
8767 goto close_shadow;
8768
8769 default:
8770 gcc_unreachable ();
8771 }
8772 }
8773 else
8774 {
8775 close_shadow:
8776 n = emit_insn_before (gen_trapb (), i);
8777 PUT_MODE (n, TImode);
8778 PUT_MODE (i, TImode);
8779 trap_pending = 0;
8780 shadow.used.i = 0;
8781 shadow.used.fp = 0;
8782 shadow.used.mem = 0;
8783 shadow.defd = shadow.used;
8784 }
8785 }
8786 }
8787
8788 if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
8789 && NONJUMP_INSN_P (i)
8790 && GET_CODE (PATTERN (i)) != USE
8791 && GET_CODE (PATTERN (i)) != CLOBBER
8792 && get_attr_trap (i) == TRAP_YES)
8793 {
8794 if (optimize && !trap_pending)
8795 summarize_insn (PATTERN (i), &shadow, 0);
8796 trap_pending = 1;
8797 }
8798 }
8799 }
8800 \f
8801 /* Alpha can only issue instruction groups simultaneously if they are
8802 suitably aligned. This is very processor-specific. */
8803 /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
8804 that are marked "fake". These instructions do not exist on that target,
8805 but it is possible to see these insns with deranged combinations of
8806 command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
8807 choose a result at random. */
8808
8809 enum alphaev4_pipe {
8810 EV4_STOP = 0,
8811 EV4_IB0 = 1,
8812 EV4_IB1 = 2,
8813 EV4_IBX = 4
8814 };
8815
8816 enum alphaev5_pipe {
8817 EV5_STOP = 0,
8818 EV5_NONE = 1,
8819 EV5_E01 = 2,
8820 EV5_E0 = 4,
8821 EV5_E1 = 8,
8822 EV5_FAM = 16,
8823 EV5_FA = 32,
8824 EV5_FM = 64
8825 };
8826
8827 static enum alphaev4_pipe
8828 alphaev4_insn_pipe (rtx insn)
8829 {
8830 if (recog_memoized (insn) < 0)
8831 return EV4_STOP;
8832 if (get_attr_length (insn) != 4)
8833 return EV4_STOP;
8834
8835 switch (get_attr_type (insn))
8836 {
8837 case TYPE_ILD:
8838 case TYPE_LDSYM:
8839 case TYPE_FLD:
8840 case TYPE_LD_L:
8841 return EV4_IBX;
8842
8843 case TYPE_IADD:
8844 case TYPE_ILOG:
8845 case TYPE_ICMOV:
8846 case TYPE_ICMP:
8847 case TYPE_FST:
8848 case TYPE_SHIFT:
8849 case TYPE_IMUL:
8850 case TYPE_FBR:
8851 case TYPE_MVI: /* fake */
8852 return EV4_IB0;
8853
8854 case TYPE_IST:
8855 case TYPE_MISC:
8856 case TYPE_IBR:
8857 case TYPE_JSR:
8858 case TYPE_CALLPAL:
8859 case TYPE_FCPYS:
8860 case TYPE_FCMOV:
8861 case TYPE_FADD:
8862 case TYPE_FDIV:
8863 case TYPE_FMUL:
8864 case TYPE_ST_C:
8865 case TYPE_MB:
8866 case TYPE_FSQRT: /* fake */
8867 case TYPE_FTOI: /* fake */
8868 case TYPE_ITOF: /* fake */
8869 return EV4_IB1;
8870
8871 default:
8872 gcc_unreachable ();
8873 }
8874 }
8875
8876 static enum alphaev5_pipe
8877 alphaev5_insn_pipe (rtx insn)
8878 {
8879 if (recog_memoized (insn) < 0)
8880 return EV5_STOP;
8881 if (get_attr_length (insn) != 4)
8882 return EV5_STOP;
8883
8884 switch (get_attr_type (insn))
8885 {
8886 case TYPE_ILD:
8887 case TYPE_FLD:
8888 case TYPE_LDSYM:
8889 case TYPE_IADD:
8890 case TYPE_ILOG:
8891 case TYPE_ICMOV:
8892 case TYPE_ICMP:
8893 return EV5_E01;
8894
8895 case TYPE_IST:
8896 case TYPE_FST:
8897 case TYPE_SHIFT:
8898 case TYPE_IMUL:
8899 case TYPE_MISC:
8900 case TYPE_MVI:
8901 case TYPE_LD_L:
8902 case TYPE_ST_C:
8903 case TYPE_MB:
8904 case TYPE_FTOI: /* fake */
8905 case TYPE_ITOF: /* fake */
8906 return EV5_E0;
8907
8908 case TYPE_IBR:
8909 case TYPE_JSR:
8910 case TYPE_CALLPAL:
8911 return EV5_E1;
8912
8913 case TYPE_FCPYS:
8914 return EV5_FAM;
8915
8916 case TYPE_FBR:
8917 case TYPE_FCMOV:
8918 case TYPE_FADD:
8919 case TYPE_FDIV:
8920 case TYPE_FSQRT: /* fake */
8921 return EV5_FA;
8922
8923 case TYPE_FMUL:
8924 return EV5_FM;
8925
8926 default:
8927 gcc_unreachable ();
8928 }
8929 }
8930
8931 /* IN_USE is a mask of the slots currently filled within the insn group.
8932 The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
8933 the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
8934
8935 LEN is, of course, the length of the group in bytes. */
8936
8937 static rtx
8938 alphaev4_next_group (rtx insn, int *pin_use, int *plen)
8939 {
8940 int len, in_use;
8941
8942 len = in_use = 0;
8943
8944 if (! INSN_P (insn)
8945 || GET_CODE (PATTERN (insn)) == CLOBBER
8946 || GET_CODE (PATTERN (insn)) == USE)
8947 goto next_and_done;
8948
8949 while (1)
8950 {
8951 enum alphaev4_pipe pipe;
8952
8953 pipe = alphaev4_insn_pipe (insn);
8954 switch (pipe)
8955 {
8956 case EV4_STOP:
8957 /* Force complex instructions to start new groups. */
8958 if (in_use)
8959 goto done;
8960
8961 /* If this is a completely unrecognized insn, it's an asm.
8962 We don't know how long it is, so record length as -1 to
8963 signal a needed realignment. */
8964 if (recog_memoized (insn) < 0)
8965 len = -1;
8966 else
8967 len = get_attr_length (insn);
8968 goto next_and_done;
8969
8970 case EV4_IBX:
8971 if (in_use & EV4_IB0)
8972 {
8973 if (in_use & EV4_IB1)
8974 goto done;
8975 in_use |= EV4_IB1;
8976 }
8977 else
8978 in_use |= EV4_IB0 | EV4_IBX;
8979 break;
8980
8981 case EV4_IB0:
8982 if (in_use & EV4_IB0)
8983 {
8984 if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
8985 goto done;
8986 in_use |= EV4_IB1;
8987 }
8988 in_use |= EV4_IB0;
8989 break;
8990
8991 case EV4_IB1:
8992 if (in_use & EV4_IB1)
8993 goto done;
8994 in_use |= EV4_IB1;
8995 break;
8996
8997 default:
8998 gcc_unreachable ();
8999 }
9000 len += 4;
9001
9002 /* Haifa doesn't do well scheduling branches. */
9003 if (JUMP_P (insn))
9004 goto next_and_done;
9005
9006 next:
9007 insn = next_nonnote_insn (insn);
9008
9009 if (!insn || ! INSN_P (insn))
9010 goto done;
9011
9012 /* Let Haifa tell us where it thinks insn group boundaries are. */
9013 if (GET_MODE (insn) == TImode)
9014 goto done;
9015
9016 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9017 goto next;
9018 }
9019
9020 next_and_done:
9021 insn = next_nonnote_insn (insn);
9022
9023 done:
9024 *plen = len;
9025 *pin_use = in_use;
9026 return insn;
9027 }
9028
9029 /* IN_USE is a mask of the slots currently filled within the insn group.
9030 The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
9031 the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
9032
9033 LEN is, of course, the length of the group in bytes. */
9034
9035 static rtx
9036 alphaev5_next_group (rtx insn, int *pin_use, int *plen)
9037 {
9038 int len, in_use;
9039
9040 len = in_use = 0;
9041
9042 if (! INSN_P (insn)
9043 || GET_CODE (PATTERN (insn)) == CLOBBER
9044 || GET_CODE (PATTERN (insn)) == USE)
9045 goto next_and_done;
9046
9047 while (1)
9048 {
9049 enum alphaev5_pipe pipe;
9050
9051 pipe = alphaev5_insn_pipe (insn);
9052 switch (pipe)
9053 {
9054 case EV5_STOP:
9055 /* Force complex instructions to start new groups. */
9056 if (in_use)
9057 goto done;
9058
9059 /* If this is a completely unrecognized insn, it's an asm.
9060 We don't know how long it is, so record length as -1 to
9061 signal a needed realignment. */
9062 if (recog_memoized (insn) < 0)
9063 len = -1;
9064 else
9065 len = get_attr_length (insn);
9066 goto next_and_done;
9067
9068 /* ??? Most of the places below, we would like to assert never
9069 happen, as it would indicate an error either in Haifa, or
9070 in the scheduling description. Unfortunately, Haifa never
9071 schedules the last instruction of the BB, so we don't have
9072 an accurate TI bit to go off. */
9073 case EV5_E01:
9074 if (in_use & EV5_E0)
9075 {
9076 if (in_use & EV5_E1)
9077 goto done;
9078 in_use |= EV5_E1;
9079 }
9080 else
9081 in_use |= EV5_E0 | EV5_E01;
9082 break;
9083
9084 case EV5_E0:
9085 if (in_use & EV5_E0)
9086 {
9087 if (!(in_use & EV5_E01) || (in_use & EV5_E1))
9088 goto done;
9089 in_use |= EV5_E1;
9090 }
9091 in_use |= EV5_E0;
9092 break;
9093
9094 case EV5_E1:
9095 if (in_use & EV5_E1)
9096 goto done;
9097 in_use |= EV5_E1;
9098 break;
9099
9100 case EV5_FAM:
9101 if (in_use & EV5_FA)
9102 {
9103 if (in_use & EV5_FM)
9104 goto done;
9105 in_use |= EV5_FM;
9106 }
9107 else
9108 in_use |= EV5_FA | EV5_FAM;
9109 break;
9110
9111 case EV5_FA:
9112 if (in_use & EV5_FA)
9113 goto done;
9114 in_use |= EV5_FA;
9115 break;
9116
9117 case EV5_FM:
9118 if (in_use & EV5_FM)
9119 goto done;
9120 in_use |= EV5_FM;
9121 break;
9122
9123 case EV5_NONE:
9124 break;
9125
9126 default:
9127 gcc_unreachable ();
9128 }
9129 len += 4;
9130
9131 /* Haifa doesn't do well scheduling branches. */
9132 /* ??? If this is predicted not-taken, slotting continues, except
9133 that no more IBR, FBR, or JSR insns may be slotted. */
9134 if (JUMP_P (insn))
9135 goto next_and_done;
9136
9137 next:
9138 insn = next_nonnote_insn (insn);
9139
9140 if (!insn || ! INSN_P (insn))
9141 goto done;
9142
9143 /* Let Haifa tell us where it thinks insn group boundaries are. */
9144 if (GET_MODE (insn) == TImode)
9145 goto done;
9146
9147 if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
9148 goto next;
9149 }
9150
9151 next_and_done:
9152 insn = next_nonnote_insn (insn);
9153
9154 done:
9155 *plen = len;
9156 *pin_use = in_use;
9157 return insn;
9158 }
9159
9160 static rtx
9161 alphaev4_next_nop (int *pin_use)
9162 {
9163 int in_use = *pin_use;
9164 rtx nop;
9165
9166 if (!(in_use & EV4_IB0))
9167 {
9168 in_use |= EV4_IB0;
9169 nop = gen_nop ();
9170 }
9171 else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
9172 {
9173 in_use |= EV4_IB1;
9174 nop = gen_nop ();
9175 }
9176 else if (TARGET_FP && !(in_use & EV4_IB1))
9177 {
9178 in_use |= EV4_IB1;
9179 nop = gen_fnop ();
9180 }
9181 else
9182 nop = gen_unop ();
9183
9184 *pin_use = in_use;
9185 return nop;
9186 }
9187
9188 static rtx
9189 alphaev5_next_nop (int *pin_use)
9190 {
9191 int in_use = *pin_use;
9192 rtx nop;
9193
9194 if (!(in_use & EV5_E1))
9195 {
9196 in_use |= EV5_E1;
9197 nop = gen_nop ();
9198 }
9199 else if (TARGET_FP && !(in_use & EV5_FA))
9200 {
9201 in_use |= EV5_FA;
9202 nop = gen_fnop ();
9203 }
9204 else if (TARGET_FP && !(in_use & EV5_FM))
9205 {
9206 in_use |= EV5_FM;
9207 nop = gen_fnop ();
9208 }
9209 else
9210 nop = gen_unop ();
9211
9212 *pin_use = in_use;
9213 return nop;
9214 }
9215
9216 /* The instruction group alignment main loop. */
9217
9218 static void
9219 alpha_align_insns (unsigned int max_align,
9220 rtx (*next_group) (rtx, int *, int *),
9221 rtx (*next_nop) (int *))
9222 {
9223 /* ALIGN is the known alignment for the insn group. */
9224 unsigned int align;
9225 /* OFS is the offset of the current insn in the insn group. */
9226 int ofs;
9227 int prev_in_use, in_use, len, ldgp;
9228 rtx i, next;
9229
9230 /* Let shorten branches care for assigning alignments to code labels. */
9231 shorten_branches (get_insns ());
9232
9233 if (align_functions < 4)
9234 align = 4;
9235 else if ((unsigned int) align_functions < max_align)
9236 align = align_functions;
9237 else
9238 align = max_align;
9239
9240 ofs = prev_in_use = 0;
9241 i = get_insns ();
9242 if (NOTE_P (i))
9243 i = next_nonnote_insn (i);
9244
9245 ldgp = alpha_function_needs_gp ? 8 : 0;
9246
9247 while (i)
9248 {
9249 next = (*next_group) (i, &in_use, &len);
9250
9251 /* When we see a label, resync alignment etc. */
9252 if (LABEL_P (i))
9253 {
9254 unsigned int new_align = 1 << label_to_alignment (i);
9255
9256 if (new_align >= align)
9257 {
9258 align = new_align < max_align ? new_align : max_align;
9259 ofs = 0;
9260 }
9261
9262 else if (ofs & (new_align-1))
9263 ofs = (ofs | (new_align-1)) + 1;
9264 gcc_assert (!len);
9265 }
9266
9267 /* Handle complex instructions special. */
9268 else if (in_use == 0)
9269 {
9270 /* Asms will have length < 0. This is a signal that we have
9271 lost alignment knowledge. Assume, however, that the asm
9272 will not mis-align instructions. */
9273 if (len < 0)
9274 {
9275 ofs = 0;
9276 align = 4;
9277 len = 0;
9278 }
9279 }
9280
9281 /* If the known alignment is smaller than the recognized insn group,
9282 realign the output. */
9283 else if ((int) align < len)
9284 {
9285 unsigned int new_log_align = len > 8 ? 4 : 3;
9286 rtx prev, where;
9287
9288 where = prev = prev_nonnote_insn (i);
9289 if (!where || !LABEL_P (where))
9290 where = i;
9291
9292 /* Can't realign between a call and its gp reload. */
9293 if (! (TARGET_EXPLICIT_RELOCS
9294 && prev && CALL_P (prev)))
9295 {
9296 emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
9297 align = 1 << new_log_align;
9298 ofs = 0;
9299 }
9300 }
9301
9302 /* We may not insert padding inside the initial ldgp sequence. */
9303 else if (ldgp > 0)
9304 ldgp -= len;
9305
9306 /* If the group won't fit in the same INT16 as the previous,
9307 we need to add padding to keep the group together. Rather
9308 than simply leaving the insn filling to the assembler, we
9309 can make use of the knowledge of what sorts of instructions
9310 were issued in the previous group to make sure that all of
9311 the added nops are really free. */
9312 else if (ofs + len > (int) align)
9313 {
9314 int nop_count = (align - ofs) / 4;
9315 rtx where;
9316
9317 /* Insert nops before labels, branches, and calls to truly merge
9318 the execution of the nops with the previous instruction group. */
9319 where = prev_nonnote_insn (i);
9320 if (where)
9321 {
9322 if (LABEL_P (where))
9323 {
9324 rtx where2 = prev_nonnote_insn (where);
9325 if (where2 && JUMP_P (where2))
9326 where = where2;
9327 }
9328 else if (NONJUMP_INSN_P (where))
9329 where = i;
9330 }
9331 else
9332 where = i;
9333
9334 do
9335 emit_insn_before ((*next_nop)(&prev_in_use), where);
9336 while (--nop_count);
9337 ofs = 0;
9338 }
9339
9340 ofs = (ofs + len) & (align - 1);
9341 prev_in_use = in_use;
9342 i = next;
9343 }
9344 }
9345
9346 /* Insert an unop between a noreturn function call and GP load. */
9347
9348 static void
9349 alpha_pad_noreturn (void)
9350 {
9351 rtx insn, next;
9352
9353 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9354 {
9355 if (!CALL_P (insn)
9356 || !find_reg_note (insn, REG_NORETURN, NULL_RTX))
9357 continue;
9358
9359 next = next_active_insn (insn);
9360
9361 if (next)
9362 {
9363 rtx pat = PATTERN (next);
9364
9365 if (GET_CODE (pat) == SET
9366 && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
9367 && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
9368 emit_insn_after (gen_unop (), insn);
9369 }
9370 }
9371 }
9372 \f
9373 /* Machine dependent reorg pass. */
9374
9375 static void
9376 alpha_reorg (void)
9377 {
9378 /* Workaround for a linker error that triggers when an
9379 exception handler immediatelly follows a noreturn function.
9380
9381 The instruction stream from an object file:
9382
9383 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
9384 58: 00 00 ba 27 ldah gp,0(ra)
9385 5c: 00 00 bd 23 lda gp,0(gp)
9386 60: 00 00 7d a7 ldq t12,0(gp)
9387 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
9388
9389 was converted in the final link pass to:
9390
9391 fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
9392 fdb28: 00 00 fe 2f unop
9393 fdb2c: 00 00 fe 2f unop
9394 fdb30: 30 82 7d a7 ldq t12,-32208(gp)
9395 fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
9396
9397 GP load instructions were wrongly cleared by the linker relaxation
9398 pass. This workaround prevents removal of GP loads by inserting
9399 an unop instruction between a noreturn function call and
9400 exception handler prologue. */
9401
9402 if (current_function_has_exception_handlers ())
9403 alpha_pad_noreturn ();
9404
9405 if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
9406 alpha_handle_trap_shadows ();
9407
9408 /* Due to the number of extra trapb insns, don't bother fixing up
9409 alignment when trap precision is instruction. Moreover, we can
9410 only do our job when sched2 is run. */
9411 if (optimize && !optimize_size
9412 && alpha_tp != ALPHA_TP_INSN
9413 && flag_schedule_insns_after_reload)
9414 {
9415 if (alpha_tune == PROCESSOR_EV4)
9416 alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
9417 else if (alpha_tune == PROCESSOR_EV5)
9418 alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
9419 }
9420 }
9421 \f
9422 #if !TARGET_ABI_UNICOSMK
9423
9424 #ifdef HAVE_STAMP_H
9425 #include <stamp.h>
9426 #endif
9427
9428 static void
9429 alpha_file_start (void)
9430 {
9431 #ifdef OBJECT_FORMAT_ELF
9432 /* If emitting dwarf2 debug information, we cannot generate a .file
9433 directive to start the file, as it will conflict with dwarf2out
9434 file numbers. So it's only useful when emitting mdebug output. */
9435 targetm.file_start_file_directive = (write_symbols == DBX_DEBUG);
9436 #endif
9437
9438 default_file_start ();
9439 #ifdef MS_STAMP
9440 fprintf (asm_out_file, "\t.verstamp %d %d\n", MS_STAMP, LS_STAMP);
9441 #endif
9442
9443 fputs ("\t.set noreorder\n", asm_out_file);
9444 fputs ("\t.set volatile\n", asm_out_file);
9445 if (!TARGET_ABI_OPEN_VMS)
9446 fputs ("\t.set noat\n", asm_out_file);
9447 if (TARGET_EXPLICIT_RELOCS)
9448 fputs ("\t.set nomacro\n", asm_out_file);
9449 if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
9450 {
9451 const char *arch;
9452
9453 if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
9454 arch = "ev6";
9455 else if (TARGET_MAX)
9456 arch = "pca56";
9457 else if (TARGET_BWX)
9458 arch = "ev56";
9459 else if (alpha_cpu == PROCESSOR_EV5)
9460 arch = "ev5";
9461 else
9462 arch = "ev4";
9463
9464 fprintf (asm_out_file, "\t.arch %s\n", arch);
9465 }
9466 }
9467 #endif
9468
9469 #ifdef OBJECT_FORMAT_ELF
9470 /* Since we don't have a .dynbss section, we should not allow global
9471 relocations in the .rodata section. */
9472
9473 static int
9474 alpha_elf_reloc_rw_mask (void)
9475 {
9476 return flag_pic ? 3 : 2;
9477 }
9478
9479 /* Return a section for X. The only special thing we do here is to
9480 honor small data. */
9481
9482 static section *
9483 alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
9484 unsigned HOST_WIDE_INT align)
9485 {
9486 if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
9487 /* ??? Consider using mergeable sdata sections. */
9488 return sdata_section;
9489 else
9490 return default_elf_select_rtx_section (mode, x, align);
9491 }
9492
9493 static unsigned int
9494 alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
9495 {
9496 unsigned int flags = 0;
9497
9498 if (strcmp (name, ".sdata") == 0
9499 || strncmp (name, ".sdata.", 7) == 0
9500 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9501 || strcmp (name, ".sbss") == 0
9502 || strncmp (name, ".sbss.", 6) == 0
9503 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9504 flags = SECTION_SMALL;
9505
9506 flags |= default_section_type_flags (decl, name, reloc);
9507 return flags;
9508 }
9509 #endif /* OBJECT_FORMAT_ELF */
9510 \f
9511 /* Structure to collect function names for final output in link section. */
9512 /* Note that items marked with GTY can't be ifdef'ed out. */
9513
9514 enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
9515 enum reloc_kind {KIND_LINKAGE, KIND_CODEADDR};
9516
9517 struct GTY(()) alpha_links
9518 {
9519 int num;
9520 rtx linkage;
9521 enum links_kind lkind;
9522 enum reloc_kind rkind;
9523 };
9524
9525 struct GTY(()) alpha_funcs
9526 {
9527 int num;
9528 splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9529 links;
9530 };
9531
9532 static GTY ((param1_is (char *), param2_is (struct alpha_links *)))
9533 splay_tree alpha_links_tree;
9534 static GTY ((param1_is (tree), param2_is (struct alpha_funcs *)))
9535 splay_tree alpha_funcs_tree;
9536
9537 static GTY(()) int alpha_funcs_num;
9538
9539 #if TARGET_ABI_OPEN_VMS
9540
9541 /* Return the VMS argument type corresponding to MODE. */
9542
9543 enum avms_arg_type
9544 alpha_arg_type (enum machine_mode mode)
9545 {
9546 switch (mode)
9547 {
9548 case SFmode:
9549 return TARGET_FLOAT_VAX ? FF : FS;
9550 case DFmode:
9551 return TARGET_FLOAT_VAX ? FD : FT;
9552 default:
9553 return I64;
9554 }
9555 }
9556
9557 /* Return an rtx for an integer representing the VMS Argument Information
9558 register value. */
9559
9560 rtx
9561 alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
9562 {
9563 unsigned HOST_WIDE_INT regval = cum.num_args;
9564 int i;
9565
9566 for (i = 0; i < 6; i++)
9567 regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
9568
9569 return GEN_INT (regval);
9570 }
9571 \f
9572 /* Make (or fake) .linkage entry for function call.
9573
9574 IS_LOCAL is 0 if name is used in call, 1 if name is used in definition.
9575
9576 Return an SYMBOL_REF rtx for the linkage. */
9577
9578 rtx
9579 alpha_need_linkage (const char *name, int is_local)
9580 {
9581 splay_tree_node node;
9582 struct alpha_links *al;
9583
9584 if (name[0] == '*')
9585 name++;
9586
9587 if (is_local)
9588 {
9589 struct alpha_funcs *cfaf;
9590
9591 if (!alpha_funcs_tree)
9592 alpha_funcs_tree = splay_tree_new_ggc ((splay_tree_compare_fn)
9593 splay_tree_compare_pointers);
9594
9595 cfaf = (struct alpha_funcs *) ggc_alloc (sizeof (struct alpha_funcs));
9596
9597 cfaf->links = 0;
9598 cfaf->num = ++alpha_funcs_num;
9599
9600 splay_tree_insert (alpha_funcs_tree,
9601 (splay_tree_key) current_function_decl,
9602 (splay_tree_value) cfaf);
9603 }
9604
9605 if (alpha_links_tree)
9606 {
9607 /* Is this name already defined? */
9608
9609 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9610 if (node)
9611 {
9612 al = (struct alpha_links *) node->value;
9613 if (is_local)
9614 {
9615 /* Defined here but external assumed. */
9616 if (al->lkind == KIND_EXTERN)
9617 al->lkind = KIND_LOCAL;
9618 }
9619 else
9620 {
9621 /* Used here but unused assumed. */
9622 if (al->lkind == KIND_UNUSED)
9623 al->lkind = KIND_LOCAL;
9624 }
9625 return al->linkage;
9626 }
9627 }
9628 else
9629 alpha_links_tree = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9630
9631 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9632 name = ggc_strdup (name);
9633
9634 /* Assume external if no definition. */
9635 al->lkind = (is_local ? KIND_UNUSED : KIND_EXTERN);
9636
9637 /* Ensure we have an IDENTIFIER so assemble_name can mark it used. */
9638 get_identifier (name);
9639
9640 /* Construct a SYMBOL_REF for us to call. */
9641 {
9642 size_t name_len = strlen (name);
9643 char *linksym = XALLOCAVEC (char, name_len + 6);
9644 linksym[0] = '$';
9645 memcpy (linksym + 1, name, name_len);
9646 memcpy (linksym + 1 + name_len, "..lk", 5);
9647 al->linkage = gen_rtx_SYMBOL_REF (Pmode,
9648 ggc_alloc_string (linksym, name_len + 5));
9649 }
9650
9651 splay_tree_insert (alpha_links_tree, (splay_tree_key) name,
9652 (splay_tree_value) al);
9653
9654 return al->linkage;
9655 }
9656
9657 rtx
9658 alpha_use_linkage (rtx linkage, tree cfundecl, int lflag, int rflag)
9659 {
9660 splay_tree_node cfunnode;
9661 struct alpha_funcs *cfaf;
9662 struct alpha_links *al;
9663 const char *name = XSTR (linkage, 0);
9664
9665 cfaf = (struct alpha_funcs *) 0;
9666 al = (struct alpha_links *) 0;
9667
9668 cfunnode = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) cfundecl);
9669 cfaf = (struct alpha_funcs *) cfunnode->value;
9670
9671 if (cfaf->links)
9672 {
9673 splay_tree_node lnode;
9674
9675 /* Is this name already defined? */
9676
9677 lnode = splay_tree_lookup (cfaf->links, (splay_tree_key) name);
9678 if (lnode)
9679 al = (struct alpha_links *) lnode->value;
9680 }
9681 else
9682 cfaf->links = splay_tree_new_ggc ((splay_tree_compare_fn) strcmp);
9683
9684 if (!al)
9685 {
9686 size_t name_len;
9687 size_t buflen;
9688 char buf [512];
9689 char *linksym;
9690 splay_tree_node node = 0;
9691 struct alpha_links *anl;
9692
9693 if (name[0] == '*')
9694 name++;
9695
9696 name_len = strlen (name);
9697
9698 al = (struct alpha_links *) ggc_alloc (sizeof (struct alpha_links));
9699 al->num = cfaf->num;
9700
9701 node = splay_tree_lookup (alpha_links_tree, (splay_tree_key) name);
9702 if (node)
9703 {
9704 anl = (struct alpha_links *) node->value;
9705 al->lkind = anl->lkind;
9706 }
9707
9708 sprintf (buf, "$%d..%s..lk", cfaf->num, name);
9709 buflen = strlen (buf);
9710 linksym = XALLOCAVEC (char, buflen + 1);
9711 memcpy (linksym, buf, buflen + 1);
9712
9713 al->linkage = gen_rtx_SYMBOL_REF
9714 (Pmode, ggc_alloc_string (linksym, buflen + 1));
9715
9716 splay_tree_insert (cfaf->links, (splay_tree_key) name,
9717 (splay_tree_value) al);
9718 }
9719
9720 if (rflag)
9721 al->rkind = KIND_CODEADDR;
9722 else
9723 al->rkind = KIND_LINKAGE;
9724
9725 if (lflag)
9726 return gen_rtx_MEM (Pmode, plus_constant (al->linkage, 8));
9727 else
9728 return al->linkage;
9729 }
9730
9731 static int
9732 alpha_write_one_linkage (splay_tree_node node, void *data)
9733 {
9734 const char *const name = (const char *) node->key;
9735 struct alpha_links *link = (struct alpha_links *) node->value;
9736 FILE *stream = (FILE *) data;
9737
9738 fprintf (stream, "$%d..%s..lk:\n", link->num, name);
9739 if (link->rkind == KIND_CODEADDR)
9740 {
9741 if (link->lkind == KIND_LOCAL)
9742 {
9743 /* Local and used */
9744 fprintf (stream, "\t.quad %s..en\n", name);
9745 }
9746 else
9747 {
9748 /* External and used, request code address. */
9749 fprintf (stream, "\t.code_address %s\n", name);
9750 }
9751 }
9752 else
9753 {
9754 if (link->lkind == KIND_LOCAL)
9755 {
9756 /* Local and used, build linkage pair. */
9757 fprintf (stream, "\t.quad %s..en\n", name);
9758 fprintf (stream, "\t.quad %s\n", name);
9759 }
9760 else
9761 {
9762 /* External and used, request linkage pair. */
9763 fprintf (stream, "\t.linkage %s\n", name);
9764 }
9765 }
9766
9767 return 0;
9768 }
9769
9770 static void
9771 alpha_write_linkage (FILE *stream, const char *funname, tree fundecl)
9772 {
9773 splay_tree_node node;
9774 struct alpha_funcs *func;
9775
9776 fprintf (stream, "\t.link\n");
9777 fprintf (stream, "\t.align 3\n");
9778 in_section = NULL;
9779
9780 node = splay_tree_lookup (alpha_funcs_tree, (splay_tree_key) fundecl);
9781 func = (struct alpha_funcs *) node->value;
9782
9783 fputs ("\t.name ", stream);
9784 assemble_name (stream, funname);
9785 fputs ("..na\n", stream);
9786 ASM_OUTPUT_LABEL (stream, funname);
9787 fprintf (stream, "\t.pdesc ");
9788 assemble_name (stream, funname);
9789 fprintf (stream, "..en,%s\n",
9790 alpha_procedure_type == PT_STACK ? "stack"
9791 : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
9792
9793 if (func->links)
9794 {
9795 splay_tree_foreach (func->links, alpha_write_one_linkage, stream);
9796 /* splay_tree_delete (func->links); */
9797 }
9798 }
9799
9800 /* Given a decl, a section name, and whether the decl initializer
9801 has relocs, choose attributes for the section. */
9802
9803 #define SECTION_VMS_OVERLAY SECTION_FORGET
9804 #define SECTION_VMS_GLOBAL SECTION_MACH_DEP
9805 #define SECTION_VMS_INITIALIZE (SECTION_VMS_GLOBAL << 1)
9806
9807 static unsigned int
9808 vms_section_type_flags (tree decl, const char *name, int reloc)
9809 {
9810 unsigned int flags = default_section_type_flags (decl, name, reloc);
9811
9812 if (decl && DECL_ATTRIBUTES (decl)
9813 && lookup_attribute ("overlaid", DECL_ATTRIBUTES (decl)))
9814 flags |= SECTION_VMS_OVERLAY;
9815 if (decl && DECL_ATTRIBUTES (decl)
9816 && lookup_attribute ("global", DECL_ATTRIBUTES (decl)))
9817 flags |= SECTION_VMS_GLOBAL;
9818 if (decl && DECL_ATTRIBUTES (decl)
9819 && lookup_attribute ("initialize", DECL_ATTRIBUTES (decl)))
9820 flags |= SECTION_VMS_INITIALIZE;
9821
9822 return flags;
9823 }
9824
9825 /* Switch to an arbitrary section NAME with attributes as specified
9826 by FLAGS. ALIGN specifies any known alignment requirements for
9827 the section; 0 if the default should be used. */
9828
9829 static void
9830 vms_asm_named_section (const char *name, unsigned int flags,
9831 tree decl ATTRIBUTE_UNUSED)
9832 {
9833 fputc ('\n', asm_out_file);
9834 fprintf (asm_out_file, ".section\t%s", name);
9835
9836 if (flags & SECTION_VMS_OVERLAY)
9837 fprintf (asm_out_file, ",OVR");
9838 if (flags & SECTION_VMS_GLOBAL)
9839 fprintf (asm_out_file, ",GBL");
9840 if (flags & SECTION_VMS_INITIALIZE)
9841 fprintf (asm_out_file, ",NOMOD");
9842 if (flags & SECTION_DEBUG)
9843 fprintf (asm_out_file, ",NOWRT");
9844
9845 fputc ('\n', asm_out_file);
9846 }
9847
9848 /* Record an element in the table of global constructors. SYMBOL is
9849 a SYMBOL_REF of the function to be called; PRIORITY is a number
9850 between 0 and MAX_INIT_PRIORITY.
9851
9852 Differs from default_ctors_section_asm_out_constructor in that the
9853 width of the .ctors entry is always 64 bits, rather than the 32 bits
9854 used by a normal pointer. */
9855
9856 static void
9857 vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9858 {
9859 switch_to_section (ctors_section);
9860 assemble_align (BITS_PER_WORD);
9861 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9862 }
9863
9864 static void
9865 vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
9866 {
9867 switch_to_section (dtors_section);
9868 assemble_align (BITS_PER_WORD);
9869 assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
9870 }
9871 #else
9872
9873 rtx
9874 alpha_need_linkage (const char *name ATTRIBUTE_UNUSED,
9875 int is_local ATTRIBUTE_UNUSED)
9876 {
9877 return NULL_RTX;
9878 }
9879
9880 rtx
9881 alpha_use_linkage (rtx linkage ATTRIBUTE_UNUSED,
9882 tree cfundecl ATTRIBUTE_UNUSED,
9883 int lflag ATTRIBUTE_UNUSED,
9884 int rflag ATTRIBUTE_UNUSED)
9885 {
9886 return NULL_RTX;
9887 }
9888
9889 #endif /* TARGET_ABI_OPEN_VMS */
9890 \f
9891 #if TARGET_ABI_UNICOSMK
9892
9893 /* This evaluates to true if we do not know how to pass TYPE solely in
9894 registers. This is the case for all arguments that do not fit in two
9895 registers. */
9896
9897 static bool
9898 unicosmk_must_pass_in_stack (enum machine_mode mode, const_tree type)
9899 {
9900 if (type == NULL)
9901 return false;
9902
9903 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
9904 return true;
9905 if (TREE_ADDRESSABLE (type))
9906 return true;
9907
9908 return ALPHA_ARG_SIZE (mode, type, 0) > 2;
9909 }
9910
9911 /* Define the offset between two registers, one to be eliminated, and the
9912 other its replacement, at the start of a routine. */
9913
9914 int
9915 unicosmk_initial_elimination_offset (int from, int to)
9916 {
9917 int fixed_size;
9918
9919 fixed_size = alpha_sa_size();
9920 if (fixed_size != 0)
9921 fixed_size += 48;
9922
9923 if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9924 return -fixed_size;
9925 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
9926 return 0;
9927 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9928 return (ALPHA_ROUND (crtl->outgoing_args_size)
9929 + ALPHA_ROUND (get_frame_size()));
9930 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
9931 return (ALPHA_ROUND (fixed_size)
9932 + ALPHA_ROUND (get_frame_size()
9933 + crtl->outgoing_args_size));
9934 else
9935 gcc_unreachable ();
9936 }
9937
9938 /* Output the module name for .ident and .end directives. We have to strip
9939 directories and add make sure that the module name starts with a letter
9940 or '$'. */
9941
9942 static void
9943 unicosmk_output_module_name (FILE *file)
9944 {
9945 const char *name = lbasename (main_input_filename);
9946 unsigned len = strlen (name);
9947 char *clean_name = alloca (len + 2);
9948 char *ptr = clean_name;
9949
9950 /* CAM only accepts module names that start with a letter or '$'. We
9951 prefix the module name with a '$' if necessary. */
9952
9953 if (!ISALPHA (*name))
9954 *ptr++ = '$';
9955 memcpy (ptr, name, len + 1);
9956 clean_symbol_name (clean_name);
9957 fputs (clean_name, file);
9958 }
9959
9960 /* Output the definition of a common variable. */
9961
9962 void
9963 unicosmk_output_common (FILE *file, const char *name, int size, int align)
9964 {
9965 tree name_tree;
9966 printf ("T3E__: common %s\n", name);
9967
9968 in_section = NULL;
9969 fputs("\t.endp\n\n\t.psect ", file);
9970 assemble_name(file, name);
9971 fprintf(file, ",%d,common\n", floor_log2 (align / BITS_PER_UNIT));
9972 fprintf(file, "\t.byte\t0:%d\n", size);
9973
9974 /* Mark the symbol as defined in this module. */
9975 name_tree = get_identifier (name);
9976 TREE_ASM_WRITTEN (name_tree) = 1;
9977 }
9978
9979 #define SECTION_PUBLIC SECTION_MACH_DEP
9980 #define SECTION_MAIN (SECTION_PUBLIC << 1)
9981 static int current_section_align;
9982
9983 /* A get_unnamed_section callback for switching to the text section. */
9984
9985 static void
9986 unicosmk_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9987 {
9988 static int count = 0;
9989 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@text___%d,code\n", count++);
9990 }
9991
9992 /* A get_unnamed_section callback for switching to the data section. */
9993
9994 static void
9995 unicosmk_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9996 {
9997 static int count = 1;
9998 fprintf (asm_out_file, "\t.endp\n\n\t.psect\tgcc@data___%d,data\n", count++);
9999 }
10000
10001 /* Implement TARGET_ASM_INIT_SECTIONS.
10002
10003 The Cray assembler is really weird with respect to sections. It has only
10004 named sections and you can't reopen a section once it has been closed.
10005 This means that we have to generate unique names whenever we want to
10006 reenter the text or the data section. */
10007
10008 static void
10009 unicosmk_init_sections (void)
10010 {
10011 text_section = get_unnamed_section (SECTION_CODE,
10012 unicosmk_output_text_section_asm_op,
10013 NULL);
10014 data_section = get_unnamed_section (SECTION_WRITE,
10015 unicosmk_output_data_section_asm_op,
10016 NULL);
10017 readonly_data_section = data_section;
10018 }
10019
10020 static unsigned int
10021 unicosmk_section_type_flags (tree decl, const char *name,
10022 int reloc ATTRIBUTE_UNUSED)
10023 {
10024 unsigned int flags = default_section_type_flags (decl, name, reloc);
10025
10026 if (!decl)
10027 return flags;
10028
10029 if (TREE_CODE (decl) == FUNCTION_DECL)
10030 {
10031 current_section_align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
10032 if (align_functions_log > current_section_align)
10033 current_section_align = align_functions_log;
10034
10035 if (! strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)), "main"))
10036 flags |= SECTION_MAIN;
10037 }
10038 else
10039 current_section_align = floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT);
10040
10041 if (TREE_PUBLIC (decl))
10042 flags |= SECTION_PUBLIC;
10043
10044 return flags;
10045 }
10046
10047 /* Generate a section name for decl and associate it with the
10048 declaration. */
10049
10050 static void
10051 unicosmk_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
10052 {
10053 const char *name;
10054 int len;
10055
10056 gcc_assert (decl);
10057
10058 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10059 name = default_strip_name_encoding (name);
10060 len = strlen (name);
10061
10062 if (TREE_CODE (decl) == FUNCTION_DECL)
10063 {
10064 char *string;
10065
10066 /* It is essential that we prefix the section name here because
10067 otherwise the section names generated for constructors and
10068 destructors confuse collect2. */
10069
10070 string = alloca (len + 6);
10071 sprintf (string, "code@%s", name);
10072 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10073 }
10074 else if (TREE_PUBLIC (decl))
10075 DECL_SECTION_NAME (decl) = build_string (len, name);
10076 else
10077 {
10078 char *string;
10079
10080 string = alloca (len + 6);
10081 sprintf (string, "data@%s", name);
10082 DECL_SECTION_NAME (decl) = build_string (len + 5, string);
10083 }
10084 }
10085
10086 /* Switch to an arbitrary section NAME with attributes as specified
10087 by FLAGS. ALIGN specifies any known alignment requirements for
10088 the section; 0 if the default should be used. */
10089
10090 static void
10091 unicosmk_asm_named_section (const char *name, unsigned int flags,
10092 tree decl ATTRIBUTE_UNUSED)
10093 {
10094 const char *kind;
10095
10096 /* Close the previous section. */
10097
10098 fputs ("\t.endp\n\n", asm_out_file);
10099
10100 /* Find out what kind of section we are opening. */
10101
10102 if (flags & SECTION_MAIN)
10103 fputs ("\t.start\tmain\n", asm_out_file);
10104
10105 if (flags & SECTION_CODE)
10106 kind = "code";
10107 else if (flags & SECTION_PUBLIC)
10108 kind = "common";
10109 else
10110 kind = "data";
10111
10112 if (current_section_align != 0)
10113 fprintf (asm_out_file, "\t.psect\t%s,%d,%s\n", name,
10114 current_section_align, kind);
10115 else
10116 fprintf (asm_out_file, "\t.psect\t%s,%s\n", name, kind);
10117 }
10118
10119 static void
10120 unicosmk_insert_attributes (tree decl, tree *attr_ptr ATTRIBUTE_UNUSED)
10121 {
10122 if (DECL_P (decl)
10123 && (TREE_PUBLIC (decl) || TREE_CODE (decl) == FUNCTION_DECL))
10124 unicosmk_unique_section (decl, 0);
10125 }
10126
10127 /* Output an alignment directive. We have to use the macro 'gcc@code@align'
10128 in code sections because .align fill unused space with zeroes. */
10129
10130 void
10131 unicosmk_output_align (FILE *file, int align)
10132 {
10133 if (inside_function)
10134 fprintf (file, "\tgcc@code@align\t%d\n", align);
10135 else
10136 fprintf (file, "\t.align\t%d\n", align);
10137 }
10138
10139 /* Add a case vector to the current function's list of deferred case
10140 vectors. Case vectors have to be put into a separate section because CAM
10141 does not allow data definitions in code sections. */
10142
10143 void
10144 unicosmk_defer_case_vector (rtx lab, rtx vec)
10145 {
10146 struct machine_function *machine = cfun->machine;
10147
10148 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
10149 machine->addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec,
10150 machine->addr_list);
10151 }
10152
10153 /* Output a case vector. */
10154
10155 static void
10156 unicosmk_output_addr_vec (FILE *file, rtx vec)
10157 {
10158 rtx lab = XEXP (vec, 0);
10159 rtx body = XEXP (vec, 1);
10160 int vlen = XVECLEN (body, 0);
10161 int idx;
10162
10163 (*targetm.asm_out.internal_label) (file, "L", CODE_LABEL_NUMBER (lab));
10164
10165 for (idx = 0; idx < vlen; idx++)
10166 {
10167 ASM_OUTPUT_ADDR_VEC_ELT
10168 (file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
10169 }
10170 }
10171
10172 /* Output current function's deferred case vectors. */
10173
10174 static void
10175 unicosmk_output_deferred_case_vectors (FILE *file)
10176 {
10177 struct machine_function *machine = cfun->machine;
10178 rtx t;
10179
10180 if (machine->addr_list == NULL_RTX)
10181 return;
10182
10183 switch_to_section (data_section);
10184 for (t = machine->addr_list; t; t = XEXP (t, 1))
10185 unicosmk_output_addr_vec (file, XEXP (t, 0));
10186 }
10187
10188 /* Generate the name of the SSIB section for the current function. */
10189
10190 #define SSIB_PREFIX "__SSIB_"
10191 #define SSIB_PREFIX_LEN 7
10192
10193 static const char *
10194 unicosmk_ssib_name (void)
10195 {
10196 /* This is ok since CAM won't be able to deal with names longer than that
10197 anyway. */
10198
10199 static char name[256];
10200
10201 rtx x;
10202 const char *fnname;
10203 int len;
10204
10205 x = DECL_RTL (cfun->decl);
10206 gcc_assert (MEM_P (x));
10207 x = XEXP (x, 0);
10208 gcc_assert (GET_CODE (x) == SYMBOL_REF);
10209 fnname = XSTR (x, 0);
10210
10211 len = strlen (fnname);
10212 if (len + SSIB_PREFIX_LEN > 255)
10213 len = 255 - SSIB_PREFIX_LEN;
10214
10215 strcpy (name, SSIB_PREFIX);
10216 strncpy (name + SSIB_PREFIX_LEN, fnname, len);
10217 name[len + SSIB_PREFIX_LEN] = 0;
10218
10219 return name;
10220 }
10221
10222 /* Set up the dynamic subprogram information block (DSIB) and update the
10223 frame pointer register ($15) for subroutines which have a frame. If the
10224 subroutine doesn't have a frame, simply increment $15. */
10225
10226 static void
10227 unicosmk_gen_dsib (unsigned long *imaskP)
10228 {
10229 if (alpha_procedure_type == PT_STACK)
10230 {
10231 const char *ssib_name;
10232 rtx mem;
10233
10234 /* Allocate 64 bytes for the DSIB. */
10235
10236 FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
10237 GEN_INT (-64))));
10238 emit_insn (gen_blockage ());
10239
10240 /* Save the return address. */
10241
10242 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 56));
10243 set_mem_alias_set (mem, alpha_sr_alias_set);
10244 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, REG_RA)));
10245 (*imaskP) &= ~(1UL << REG_RA);
10246
10247 /* Save the old frame pointer. */
10248
10249 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 48));
10250 set_mem_alias_set (mem, alpha_sr_alias_set);
10251 FRP (emit_move_insn (mem, hard_frame_pointer_rtx));
10252 (*imaskP) &= ~(1UL << HARD_FRAME_POINTER_REGNUM);
10253
10254 emit_insn (gen_blockage ());
10255
10256 /* Store the SSIB pointer. */
10257
10258 ssib_name = ggc_strdup (unicosmk_ssib_name ());
10259 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 32));
10260 set_mem_alias_set (mem, alpha_sr_alias_set);
10261
10262 FRP (emit_move_insn (gen_rtx_REG (DImode, 5),
10263 gen_rtx_SYMBOL_REF (Pmode, ssib_name)));
10264 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 5)));
10265
10266 /* Save the CIW index. */
10267
10268 mem = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx, 24));
10269 set_mem_alias_set (mem, alpha_sr_alias_set);
10270 FRP (emit_move_insn (mem, gen_rtx_REG (DImode, 25)));
10271
10272 emit_insn (gen_blockage ());
10273
10274 /* Set the new frame pointer. */
10275 FRP (emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10276 stack_pointer_rtx, GEN_INT (64))));
10277 }
10278 else
10279 {
10280 /* Increment the frame pointer register to indicate that we do not
10281 have a frame. */
10282 emit_insn (gen_adddi3 (hard_frame_pointer_rtx,
10283 hard_frame_pointer_rtx, const1_rtx));
10284 }
10285 }
10286
10287 /* Output the static subroutine information block for the current
10288 function. */
10289
10290 static void
10291 unicosmk_output_ssib (FILE *file, const char *fnname)
10292 {
10293 int len;
10294 int i;
10295 rtx x;
10296 rtx ciw;
10297 struct machine_function *machine = cfun->machine;
10298
10299 in_section = NULL;
10300 fprintf (file, "\t.endp\n\n\t.psect\t%s%s,data\n", user_label_prefix,
10301 unicosmk_ssib_name ());
10302
10303 /* Some required stuff and the function name length. */
10304
10305 len = strlen (fnname);
10306 fprintf (file, "\t.quad\t^X20008%2.2X28\n", len);
10307
10308 /* Saved registers
10309 ??? We don't do that yet. */
10310
10311 fputs ("\t.quad\t0\n", file);
10312
10313 /* Function address. */
10314
10315 fputs ("\t.quad\t", file);
10316 assemble_name (file, fnname);
10317 putc ('\n', file);
10318
10319 fputs ("\t.quad\t0\n", file);
10320 fputs ("\t.quad\t0\n", file);
10321
10322 /* Function name.
10323 ??? We do it the same way Cray CC does it but this could be
10324 simplified. */
10325
10326 for( i = 0; i < len; i++ )
10327 fprintf (file, "\t.byte\t%d\n", (int)(fnname[i]));
10328 if( (len % 8) == 0 )
10329 fputs ("\t.quad\t0\n", file);
10330 else
10331 fprintf (file, "\t.bits\t%d : 0\n", (8 - (len % 8))*8);
10332
10333 /* All call information words used in the function. */
10334
10335 for (x = machine->first_ciw; x; x = XEXP (x, 1))
10336 {
10337 ciw = XEXP (x, 0);
10338 #if HOST_BITS_PER_WIDE_INT == 32
10339 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_DOUBLE_HEX "\n",
10340 CONST_DOUBLE_HIGH (ciw), CONST_DOUBLE_LOW (ciw));
10341 #else
10342 fprintf (file, "\t.quad\t" HOST_WIDE_INT_PRINT_HEX "\n", INTVAL (ciw));
10343 #endif
10344 }
10345 }
10346
10347 /* Add a call information word (CIW) to the list of the current function's
10348 CIWs and return its index.
10349
10350 X is a CONST_INT or CONST_DOUBLE representing the CIW. */
10351
10352 rtx
10353 unicosmk_add_call_info_word (rtx x)
10354 {
10355 rtx node;
10356 struct machine_function *machine = cfun->machine;
10357
10358 node = gen_rtx_EXPR_LIST (VOIDmode, x, NULL_RTX);
10359 if (machine->first_ciw == NULL_RTX)
10360 machine->first_ciw = node;
10361 else
10362 XEXP (machine->last_ciw, 1) = node;
10363
10364 machine->last_ciw = node;
10365 ++machine->ciw_count;
10366
10367 return GEN_INT (machine->ciw_count
10368 + strlen (current_function_name ())/8 + 5);
10369 }
10370
10371 /* The Cray assembler doesn't accept extern declarations for symbols which
10372 are defined in the same file. We have to keep track of all global
10373 symbols which are referenced and/or defined in a source file and output
10374 extern declarations for those which are referenced but not defined at
10375 the end of file. */
10376
10377 /* List of identifiers for which an extern declaration might have to be
10378 emitted. */
10379 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10380
10381 struct unicosmk_extern_list
10382 {
10383 struct unicosmk_extern_list *next;
10384 const char *name;
10385 };
10386
10387 static struct unicosmk_extern_list *unicosmk_extern_head = 0;
10388
10389 /* Output extern declarations which are required for every asm file. */
10390
10391 static void
10392 unicosmk_output_default_externs (FILE *file)
10393 {
10394 static const char *const externs[] =
10395 { "__T3E_MISMATCH" };
10396
10397 int i;
10398 int n;
10399
10400 n = ARRAY_SIZE (externs);
10401
10402 for (i = 0; i < n; i++)
10403 fprintf (file, "\t.extern\t%s\n", externs[i]);
10404 }
10405
10406 /* Output extern declarations for global symbols which are have been
10407 referenced but not defined. */
10408
10409 static void
10410 unicosmk_output_externs (FILE *file)
10411 {
10412 struct unicosmk_extern_list *p;
10413 const char *real_name;
10414 int len;
10415 tree name_tree;
10416
10417 len = strlen (user_label_prefix);
10418 for (p = unicosmk_extern_head; p != 0; p = p->next)
10419 {
10420 /* We have to strip the encoding and possibly remove user_label_prefix
10421 from the identifier in order to handle -fleading-underscore and
10422 explicit asm names correctly (cf. gcc.dg/asm-names-1.c). */
10423 real_name = default_strip_name_encoding (p->name);
10424 if (len && p->name[0] == '*'
10425 && !memcmp (real_name, user_label_prefix, len))
10426 real_name += len;
10427
10428 name_tree = get_identifier (real_name);
10429 if (! TREE_ASM_WRITTEN (name_tree))
10430 {
10431 TREE_ASM_WRITTEN (name_tree) = 1;
10432 fputs ("\t.extern\t", file);
10433 assemble_name (file, p->name);
10434 putc ('\n', file);
10435 }
10436 }
10437 }
10438
10439 /* Record an extern. */
10440
10441 void
10442 unicosmk_add_extern (const char *name)
10443 {
10444 struct unicosmk_extern_list *p;
10445
10446 p = (struct unicosmk_extern_list *)
10447 xmalloc (sizeof (struct unicosmk_extern_list));
10448 p->next = unicosmk_extern_head;
10449 p->name = name;
10450 unicosmk_extern_head = p;
10451 }
10452
10453 /* The Cray assembler generates incorrect code if identifiers which
10454 conflict with register names are used as instruction operands. We have
10455 to replace such identifiers with DEX expressions. */
10456
10457 /* Structure to collect identifiers which have been replaced by DEX
10458 expressions. */
10459 /* FIXME: needs to use GC, so it can be saved and restored for PCH. */
10460
10461 struct unicosmk_dex {
10462 struct unicosmk_dex *next;
10463 const char *name;
10464 };
10465
10466 /* List of identifiers which have been replaced by DEX expressions. The DEX
10467 number is determined by the position in the list. */
10468
10469 static struct unicosmk_dex *unicosmk_dex_list = NULL;
10470
10471 /* The number of elements in the DEX list. */
10472
10473 static int unicosmk_dex_count = 0;
10474
10475 /* Check if NAME must be replaced by a DEX expression. */
10476
10477 static int
10478 unicosmk_special_name (const char *name)
10479 {
10480 if (name[0] == '*')
10481 ++name;
10482
10483 if (name[0] == '$')
10484 ++name;
10485
10486 if (name[0] != 'r' && name[0] != 'f' && name[0] != 'R' && name[0] != 'F')
10487 return 0;
10488
10489 switch (name[1])
10490 {
10491 case '1': case '2':
10492 return (name[2] == '\0' || (ISDIGIT (name[2]) && name[3] == '\0'));
10493
10494 case '3':
10495 return (name[2] == '\0'
10496 || ((name[2] == '0' || name[2] == '1') && name[3] == '\0'));
10497
10498 default:
10499 return (ISDIGIT (name[1]) && name[2] == '\0');
10500 }
10501 }
10502
10503 /* Return the DEX number if X must be replaced by a DEX expression and 0
10504 otherwise. */
10505
10506 static int
10507 unicosmk_need_dex (rtx x)
10508 {
10509 struct unicosmk_dex *dex;
10510 const char *name;
10511 int i;
10512
10513 if (GET_CODE (x) != SYMBOL_REF)
10514 return 0;
10515
10516 name = XSTR (x,0);
10517 if (! unicosmk_special_name (name))
10518 return 0;
10519
10520 i = unicosmk_dex_count;
10521 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10522 {
10523 if (! strcmp (name, dex->name))
10524 return i;
10525 --i;
10526 }
10527
10528 dex = (struct unicosmk_dex *) xmalloc (sizeof (struct unicosmk_dex));
10529 dex->name = name;
10530 dex->next = unicosmk_dex_list;
10531 unicosmk_dex_list = dex;
10532
10533 ++unicosmk_dex_count;
10534 return unicosmk_dex_count;
10535 }
10536
10537 /* Output the DEX definitions for this file. */
10538
10539 static void
10540 unicosmk_output_dex (FILE *file)
10541 {
10542 struct unicosmk_dex *dex;
10543 int i;
10544
10545 if (unicosmk_dex_list == NULL)
10546 return;
10547
10548 fprintf (file, "\t.dexstart\n");
10549
10550 i = unicosmk_dex_count;
10551 for (dex = unicosmk_dex_list; dex; dex = dex->next)
10552 {
10553 fprintf (file, "\tDEX (%d) = ", i);
10554 assemble_name (file, dex->name);
10555 putc ('\n', file);
10556 --i;
10557 }
10558
10559 fprintf (file, "\t.dexend\n");
10560 }
10561
10562 /* Output text that to appear at the beginning of an assembler file. */
10563
10564 static void
10565 unicosmk_file_start (void)
10566 {
10567 int i;
10568
10569 fputs ("\t.ident\t", asm_out_file);
10570 unicosmk_output_module_name (asm_out_file);
10571 fputs ("\n\n", asm_out_file);
10572
10573 /* The Unicos/Mk assembler uses different register names. Instead of trying
10574 to support them, we simply use micro definitions. */
10575
10576 /* CAM has different register names: rN for the integer register N and fN
10577 for the floating-point register N. Instead of trying to use these in
10578 alpha.md, we define the symbols $N and $fN to refer to the appropriate
10579 register. */
10580
10581 for (i = 0; i < 32; ++i)
10582 fprintf (asm_out_file, "$%d <- r%d\n", i, i);
10583
10584 for (i = 0; i < 32; ++i)
10585 fprintf (asm_out_file, "$f%d <- f%d\n", i, i);
10586
10587 putc ('\n', asm_out_file);
10588
10589 /* The .align directive fill unused space with zeroes which does not work
10590 in code sections. We define the macro 'gcc@code@align' which uses nops
10591 instead. Note that it assumes that code sections always have the
10592 biggest possible alignment since . refers to the current offset from
10593 the beginning of the section. */
10594
10595 fputs ("\t.macro gcc@code@align n\n", asm_out_file);
10596 fputs ("gcc@n@bytes = 1 << n\n", asm_out_file);
10597 fputs ("gcc@here = . % gcc@n@bytes\n", asm_out_file);
10598 fputs ("\t.if ne, gcc@here, 0\n", asm_out_file);
10599 fputs ("\t.repeat (gcc@n@bytes - gcc@here) / 4\n", asm_out_file);
10600 fputs ("\tbis r31,r31,r31\n", asm_out_file);
10601 fputs ("\t.endr\n", asm_out_file);
10602 fputs ("\t.endif\n", asm_out_file);
10603 fputs ("\t.endm gcc@code@align\n\n", asm_out_file);
10604
10605 /* Output extern declarations which should always be visible. */
10606 unicosmk_output_default_externs (asm_out_file);
10607
10608 /* Open a dummy section. We always need to be inside a section for the
10609 section-switching code to work correctly.
10610 ??? This should be a module id or something like that. I still have to
10611 figure out what the rules for those are. */
10612 fputs ("\n\t.psect\t$SG00000,data\n", asm_out_file);
10613 }
10614
10615 /* Output text to appear at the end of an assembler file. This includes all
10616 pending extern declarations and DEX expressions. */
10617
10618 static void
10619 unicosmk_file_end (void)
10620 {
10621 fputs ("\t.endp\n\n", asm_out_file);
10622
10623 /* Output all pending externs. */
10624
10625 unicosmk_output_externs (asm_out_file);
10626
10627 /* Output dex definitions used for functions whose names conflict with
10628 register names. */
10629
10630 unicosmk_output_dex (asm_out_file);
10631
10632 fputs ("\t.end\t", asm_out_file);
10633 unicosmk_output_module_name (asm_out_file);
10634 putc ('\n', asm_out_file);
10635 }
10636
10637 #else
10638
10639 static void
10640 unicosmk_output_deferred_case_vectors (FILE *file ATTRIBUTE_UNUSED)
10641 {}
10642
10643 static void
10644 unicosmk_gen_dsib (unsigned long *imaskP ATTRIBUTE_UNUSED)
10645 {}
10646
10647 static void
10648 unicosmk_output_ssib (FILE * file ATTRIBUTE_UNUSED,
10649 const char * fnname ATTRIBUTE_UNUSED)
10650 {}
10651
10652 rtx
10653 unicosmk_add_call_info_word (rtx x ATTRIBUTE_UNUSED)
10654 {
10655 return NULL_RTX;
10656 }
10657
10658 static int
10659 unicosmk_need_dex (rtx x ATTRIBUTE_UNUSED)
10660 {
10661 return 0;
10662 }
10663
10664 #endif /* TARGET_ABI_UNICOSMK */
10665
10666 static void
10667 alpha_init_libfuncs (void)
10668 {
10669 if (TARGET_ABI_UNICOSMK)
10670 {
10671 /* Prevent gcc from generating calls to __divsi3. */
10672 set_optab_libfunc (sdiv_optab, SImode, 0);
10673 set_optab_libfunc (udiv_optab, SImode, 0);
10674
10675 /* Use the functions provided by the system library
10676 for DImode integer division. */
10677 set_optab_libfunc (sdiv_optab, DImode, "$sldiv");
10678 set_optab_libfunc (udiv_optab, DImode, "$uldiv");
10679 }
10680 else if (TARGET_ABI_OPEN_VMS)
10681 {
10682 /* Use the VMS runtime library functions for division and
10683 remainder. */
10684 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10685 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10686 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10687 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10688 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10689 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10690 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10691 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10692 }
10693 }
10694
10695 \f
10696 /* Initialize the GCC target structure. */
10697 #if TARGET_ABI_OPEN_VMS
10698 # undef TARGET_ATTRIBUTE_TABLE
10699 # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
10700 # undef TARGET_SECTION_TYPE_FLAGS
10701 # define TARGET_SECTION_TYPE_FLAGS vms_section_type_flags
10702 #endif
10703
10704 #undef TARGET_IN_SMALL_DATA_P
10705 #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
10706
10707 #if TARGET_ABI_UNICOSMK
10708 # undef TARGET_INSERT_ATTRIBUTES
10709 # define TARGET_INSERT_ATTRIBUTES unicosmk_insert_attributes
10710 # undef TARGET_SECTION_TYPE_FLAGS
10711 # define TARGET_SECTION_TYPE_FLAGS unicosmk_section_type_flags
10712 # undef TARGET_ASM_UNIQUE_SECTION
10713 # define TARGET_ASM_UNIQUE_SECTION unicosmk_unique_section
10714 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
10715 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
10716 # undef TARGET_ASM_GLOBALIZE_LABEL
10717 # define TARGET_ASM_GLOBALIZE_LABEL hook_void_FILEptr_constcharptr
10718 # undef TARGET_MUST_PASS_IN_STACK
10719 # define TARGET_MUST_PASS_IN_STACK unicosmk_must_pass_in_stack
10720 #endif
10721
10722 #undef TARGET_ASM_ALIGNED_HI_OP
10723 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
10724 #undef TARGET_ASM_ALIGNED_DI_OP
10725 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
10726
10727 /* Default unaligned ops are provided for ELF systems. To get unaligned
10728 data for non-ELF systems, we have to turn off auto alignment. */
10729 #ifndef OBJECT_FORMAT_ELF
10730 #undef TARGET_ASM_UNALIGNED_HI_OP
10731 #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
10732 #undef TARGET_ASM_UNALIGNED_SI_OP
10733 #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
10734 #undef TARGET_ASM_UNALIGNED_DI_OP
10735 #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
10736 #endif
10737
10738 #ifdef OBJECT_FORMAT_ELF
10739 #undef TARGET_ASM_RELOC_RW_MASK
10740 #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
10741 #undef TARGET_ASM_SELECT_RTX_SECTION
10742 #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
10743 #undef TARGET_SECTION_TYPE_FLAGS
10744 #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
10745 #endif
10746
10747 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
10748 #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
10749
10750 #undef TARGET_INIT_LIBFUNCS
10751 #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
10752
10753 #undef TARGET_LEGITIMIZE_ADDRESS
10754 #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
10755
10756 #if TARGET_ABI_UNICOSMK
10757 #undef TARGET_ASM_FILE_START
10758 #define TARGET_ASM_FILE_START unicosmk_file_start
10759 #undef TARGET_ASM_FILE_END
10760 #define TARGET_ASM_FILE_END unicosmk_file_end
10761 #else
10762 #undef TARGET_ASM_FILE_START
10763 #define TARGET_ASM_FILE_START alpha_file_start
10764 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
10765 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
10766 #endif
10767
10768 #undef TARGET_SCHED_ADJUST_COST
10769 #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
10770 #undef TARGET_SCHED_ISSUE_RATE
10771 #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
10772 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
10773 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
10774 alpha_multipass_dfa_lookahead
10775
10776 #undef TARGET_HAVE_TLS
10777 #define TARGET_HAVE_TLS HAVE_AS_TLS
10778
10779 #undef TARGET_INIT_BUILTINS
10780 #define TARGET_INIT_BUILTINS alpha_init_builtins
10781 #undef TARGET_EXPAND_BUILTIN
10782 #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
10783 #undef TARGET_FOLD_BUILTIN
10784 #define TARGET_FOLD_BUILTIN alpha_fold_builtin
10785
10786 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
10787 #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
10788 #undef TARGET_CANNOT_COPY_INSN_P
10789 #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
10790 #undef TARGET_CANNOT_FORCE_CONST_MEM
10791 #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
10792
10793 #if TARGET_ABI_OSF
10794 #undef TARGET_ASM_OUTPUT_MI_THUNK
10795 #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
10796 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
10797 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
10798 #undef TARGET_STDARG_OPTIMIZE_HOOK
10799 #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
10800 #endif
10801
10802 #undef TARGET_RTX_COSTS
10803 #define TARGET_RTX_COSTS alpha_rtx_costs
10804 #undef TARGET_ADDRESS_COST
10805 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
10806
10807 #undef TARGET_MACHINE_DEPENDENT_REORG
10808 #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
10809
10810 #undef TARGET_PROMOTE_FUNCTION_ARGS
10811 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
10812 #undef TARGET_PROMOTE_FUNCTION_RETURN
10813 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
10814 #undef TARGET_PROMOTE_PROTOTYPES
10815 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
10816 #undef TARGET_RETURN_IN_MEMORY
10817 #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
10818 #undef TARGET_PASS_BY_REFERENCE
10819 #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
10820 #undef TARGET_SETUP_INCOMING_VARARGS
10821 #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
10822 #undef TARGET_STRICT_ARGUMENT_NAMING
10823 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
10824 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
10825 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
10826 #undef TARGET_SPLIT_COMPLEX_ARG
10827 #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
10828 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
10829 #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
10830 #undef TARGET_ARG_PARTIAL_BYTES
10831 #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
10832
10833 #undef TARGET_SECONDARY_RELOAD
10834 #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
10835
10836 #undef TARGET_SCALAR_MODE_SUPPORTED_P
10837 #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
10838 #undef TARGET_VECTOR_MODE_SUPPORTED_P
10839 #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
10840
10841 #undef TARGET_BUILD_BUILTIN_VA_LIST
10842 #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
10843
10844 #undef TARGET_EXPAND_BUILTIN_VA_START
10845 #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
10846
10847 /* The Alpha architecture does not require sequential consistency. See
10848 http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
10849 for an example of how it can be violated in practice. */
10850 #undef TARGET_RELAXED_ORDERING
10851 #define TARGET_RELAXED_ORDERING true
10852
10853 #undef TARGET_DEFAULT_TARGET_FLAGS
10854 #define TARGET_DEFAULT_TARGET_FLAGS \
10855 (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS)
10856 #undef TARGET_HANDLE_OPTION
10857 #define TARGET_HANDLE_OPTION alpha_handle_option
10858
10859 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10860 #undef TARGET_MANGLE_TYPE
10861 #define TARGET_MANGLE_TYPE alpha_mangle_type
10862 #endif
10863
10864 #undef TARGET_LEGITIMATE_ADDRESS_P
10865 #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
10866
10867 struct gcc_target targetm = TARGET_INITIALIZER;
10868
10869 \f
10870 #include "gt-alpha.h"