re PR target/14454 (virtual function with vararg won't compile)
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com)
5 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
6 at Cygnus Support.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "recog.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "tm_p.h"
46 #include "debug.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "cfglayout.h"
50 #include "tree-gimple.h"
51
52 /* Processor costs */
53 static const
54 struct processor_costs cypress_costs = {
55 COSTS_N_INSNS (2), /* int load */
56 COSTS_N_INSNS (2), /* int signed load */
57 COSTS_N_INSNS (2), /* int zeroed load */
58 COSTS_N_INSNS (2), /* float load */
59 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
60 COSTS_N_INSNS (5), /* fadd, fsub */
61 COSTS_N_INSNS (1), /* fcmp */
62 COSTS_N_INSNS (1), /* fmov, fmovr */
63 COSTS_N_INSNS (7), /* fmul */
64 COSTS_N_INSNS (37), /* fdivs */
65 COSTS_N_INSNS (37), /* fdivd */
66 COSTS_N_INSNS (63), /* fsqrts */
67 COSTS_N_INSNS (63), /* fsqrtd */
68 COSTS_N_INSNS (1), /* imul */
69 COSTS_N_INSNS (1), /* imulX */
70 0, /* imul bit factor */
71 COSTS_N_INSNS (1), /* idiv */
72 COSTS_N_INSNS (1), /* idivX */
73 COSTS_N_INSNS (1), /* movcc/movr */
74 0, /* shift penalty */
75 };
76
77 static const
78 struct processor_costs supersparc_costs = {
79 COSTS_N_INSNS (1), /* int load */
80 COSTS_N_INSNS (1), /* int signed load */
81 COSTS_N_INSNS (1), /* int zeroed load */
82 COSTS_N_INSNS (0), /* float load */
83 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
84 COSTS_N_INSNS (3), /* fadd, fsub */
85 COSTS_N_INSNS (3), /* fcmp */
86 COSTS_N_INSNS (1), /* fmov, fmovr */
87 COSTS_N_INSNS (3), /* fmul */
88 COSTS_N_INSNS (6), /* fdivs */
89 COSTS_N_INSNS (9), /* fdivd */
90 COSTS_N_INSNS (12), /* fsqrts */
91 COSTS_N_INSNS (12), /* fsqrtd */
92 COSTS_N_INSNS (4), /* imul */
93 COSTS_N_INSNS (4), /* imulX */
94 0, /* imul bit factor */
95 COSTS_N_INSNS (4), /* idiv */
96 COSTS_N_INSNS (4), /* idivX */
97 COSTS_N_INSNS (1), /* movcc/movr */
98 1, /* shift penalty */
99 };
100
101 static const
102 struct processor_costs hypersparc_costs = {
103 COSTS_N_INSNS (1), /* int load */
104 COSTS_N_INSNS (1), /* int signed load */
105 COSTS_N_INSNS (1), /* int zeroed load */
106 COSTS_N_INSNS (1), /* float load */
107 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
108 COSTS_N_INSNS (1), /* fadd, fsub */
109 COSTS_N_INSNS (1), /* fcmp */
110 COSTS_N_INSNS (1), /* fmov, fmovr */
111 COSTS_N_INSNS (1), /* fmul */
112 COSTS_N_INSNS (8), /* fdivs */
113 COSTS_N_INSNS (12), /* fdivd */
114 COSTS_N_INSNS (17), /* fsqrts */
115 COSTS_N_INSNS (17), /* fsqrtd */
116 COSTS_N_INSNS (17), /* imul */
117 COSTS_N_INSNS (17), /* imulX */
118 0, /* imul bit factor */
119 COSTS_N_INSNS (17), /* idiv */
120 COSTS_N_INSNS (17), /* idivX */
121 COSTS_N_INSNS (1), /* movcc/movr */
122 0, /* shift penalty */
123 };
124
125 static const
126 struct processor_costs sparclet_costs = {
127 COSTS_N_INSNS (3), /* int load */
128 COSTS_N_INSNS (3), /* int signed load */
129 COSTS_N_INSNS (1), /* int zeroed load */
130 COSTS_N_INSNS (1), /* float load */
131 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
132 COSTS_N_INSNS (1), /* fadd, fsub */
133 COSTS_N_INSNS (1), /* fcmp */
134 COSTS_N_INSNS (1), /* fmov, fmovr */
135 COSTS_N_INSNS (1), /* fmul */
136 COSTS_N_INSNS (1), /* fdivs */
137 COSTS_N_INSNS (1), /* fdivd */
138 COSTS_N_INSNS (1), /* fsqrts */
139 COSTS_N_INSNS (1), /* fsqrtd */
140 COSTS_N_INSNS (5), /* imul */
141 COSTS_N_INSNS (5), /* imulX */
142 0, /* imul bit factor */
143 COSTS_N_INSNS (5), /* idiv */
144 COSTS_N_INSNS (5), /* idivX */
145 COSTS_N_INSNS (1), /* movcc/movr */
146 0, /* shift penalty */
147 };
148
149 static const
150 struct processor_costs ultrasparc_costs = {
151 COSTS_N_INSNS (2), /* int load */
152 COSTS_N_INSNS (3), /* int signed load */
153 COSTS_N_INSNS (2), /* int zeroed load */
154 COSTS_N_INSNS (2), /* float load */
155 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
156 COSTS_N_INSNS (4), /* fadd, fsub */
157 COSTS_N_INSNS (1), /* fcmp */
158 COSTS_N_INSNS (2), /* fmov, fmovr */
159 COSTS_N_INSNS (4), /* fmul */
160 COSTS_N_INSNS (13), /* fdivs */
161 COSTS_N_INSNS (23), /* fdivd */
162 COSTS_N_INSNS (13), /* fsqrts */
163 COSTS_N_INSNS (23), /* fsqrtd */
164 COSTS_N_INSNS (4), /* imul */
165 COSTS_N_INSNS (4), /* imulX */
166 2, /* imul bit factor */
167 COSTS_N_INSNS (37), /* idiv */
168 COSTS_N_INSNS (68), /* idivX */
169 COSTS_N_INSNS (2), /* movcc/movr */
170 2, /* shift penalty */
171 };
172
173 static const
174 struct processor_costs ultrasparc3_costs = {
175 COSTS_N_INSNS (2), /* int load */
176 COSTS_N_INSNS (3), /* int signed load */
177 COSTS_N_INSNS (3), /* int zeroed load */
178 COSTS_N_INSNS (2), /* float load */
179 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
180 COSTS_N_INSNS (4), /* fadd, fsub */
181 COSTS_N_INSNS (5), /* fcmp */
182 COSTS_N_INSNS (3), /* fmov, fmovr */
183 COSTS_N_INSNS (4), /* fmul */
184 COSTS_N_INSNS (17), /* fdivs */
185 COSTS_N_INSNS (20), /* fdivd */
186 COSTS_N_INSNS (20), /* fsqrts */
187 COSTS_N_INSNS (29), /* fsqrtd */
188 COSTS_N_INSNS (6), /* imul */
189 COSTS_N_INSNS (6), /* imulX */
190 0, /* imul bit factor */
191 COSTS_N_INSNS (40), /* idiv */
192 COSTS_N_INSNS (71), /* idivX */
193 COSTS_N_INSNS (2), /* movcc/movr */
194 0, /* shift penalty */
195 };
196
197 const struct processor_costs *sparc_costs = &cypress_costs;
198
199 #ifdef HAVE_AS_RELAX_OPTION
200 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
201 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
202 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
203 somebody does not branch between the sethi and jmp. */
204 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
205 #else
206 #define LEAF_SIBCALL_SLOT_RESERVED_P \
207 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
208 #endif
209
210 /* Global variables for machine-dependent things. */
211
212 /* Size of frame. Need to know this to emit return insns from leaf procedures.
213 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
214 reload pass. This is important as the value is later used for scheduling
215 (to see what can go in a delay slot).
216 APPARENT_FSIZE is the size of the stack less the register save area and less
217 the outgoing argument area. It is used when saving call preserved regs. */
218 static HOST_WIDE_INT apparent_fsize;
219 static HOST_WIDE_INT actual_fsize;
220
221 /* Number of live general or floating point registers needed to be
222 saved (as 4-byte quantities). */
223 static int num_gfregs;
224
225 /* The alias set for prologue/epilogue register save/restore. */
226 static GTY(()) int sparc_sr_alias_set;
227
228 /* Save the operands last given to a compare for use when we
229 generate a scc or bcc insn. */
230 rtx sparc_compare_op0, sparc_compare_op1;
231
232 /* Vector to say how input registers are mapped to output registers.
233 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
234 eliminate it. You must use -fomit-frame-pointer to get that. */
235 char leaf_reg_remap[] =
236 { 0, 1, 2, 3, 4, 5, 6, 7,
237 -1, -1, -1, -1, -1, -1, 14, -1,
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 8, 9, 10, 11, 12, 13, -1, 15,
240
241 32, 33, 34, 35, 36, 37, 38, 39,
242 40, 41, 42, 43, 44, 45, 46, 47,
243 48, 49, 50, 51, 52, 53, 54, 55,
244 56, 57, 58, 59, 60, 61, 62, 63,
245 64, 65, 66, 67, 68, 69, 70, 71,
246 72, 73, 74, 75, 76, 77, 78, 79,
247 80, 81, 82, 83, 84, 85, 86, 87,
248 88, 89, 90, 91, 92, 93, 94, 95,
249 96, 97, 98, 99, 100};
250
251 /* Vector, indexed by hard register number, which contains 1
252 for a register that is allowable in a candidate for leaf
253 function treatment. */
254 char sparc_leaf_regs[] =
255 { 1, 1, 1, 1, 1, 1, 1, 1,
256 0, 0, 0, 0, 0, 0, 1, 0,
257 0, 0, 0, 0, 0, 0, 0, 0,
258 1, 1, 1, 1, 1, 1, 0, 1,
259 1, 1, 1, 1, 1, 1, 1, 1,
260 1, 1, 1, 1, 1, 1, 1, 1,
261 1, 1, 1, 1, 1, 1, 1, 1,
262 1, 1, 1, 1, 1, 1, 1, 1,
263 1, 1, 1, 1, 1, 1, 1, 1,
264 1, 1, 1, 1, 1, 1, 1, 1,
265 1, 1, 1, 1, 1, 1, 1, 1,
266 1, 1, 1, 1, 1, 1, 1, 1,
267 1, 1, 1, 1, 1};
268
269 struct machine_function GTY(())
270 {
271 /* Some local-dynamic TLS symbol name. */
272 const char *some_ld_name;
273
274 /* True if the current function is leaf and uses only leaf regs,
275 so that the SPARC leaf function optimization can be applied.
276 Private version of current_function_uses_only_leaf_regs, see
277 sparc_expand_prologue for the rationale. */
278 int leaf_function_p;
279
280 /* True if the data calculated by sparc_expand_prologue are valid. */
281 bool prologue_data_valid_p;
282 };
283
284 #define sparc_leaf_function_p cfun->machine->leaf_function_p
285 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
286
287 /* Register we pretend to think the frame pointer is allocated to.
288 Normally, this is %fp, but if we are in a leaf procedure, this
289 is %sp+"something". We record "something" separately as it may
290 be too big for reg+constant addressing. */
291 static rtx frame_base_reg;
292 static HOST_WIDE_INT frame_base_offset;
293
294 /* 1 if the next opcode is to be specially indented. */
295 int sparc_indent_opcode = 0;
296
297 static void sparc_init_modes (void);
298 static void scan_record_type (tree, int *, int *, int *);
299 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
300 tree, int, int, int *, int *);
301
302 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
303 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
304
305 static void sparc_output_addr_vec (rtx);
306 static void sparc_output_addr_diff_vec (rtx);
307 static void sparc_output_deferred_case_vectors (void);
308 static rtx sparc_builtin_saveregs (void);
309 static int epilogue_renumber (rtx *, int);
310 static bool sparc_assemble_integer (rtx, unsigned int, int);
311 static int set_extends (rtx);
312 static void load_pic_register (void);
313 static int save_or_restore_regs (int, int, rtx, int, int);
314 static void emit_save_regs (void);
315 static void emit_restore_regs (void);
316 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
317 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
318 #ifdef OBJECT_FORMAT_ELF
319 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
320 #endif
321
322 static int sparc_adjust_cost (rtx, rtx, rtx, int);
323 static int sparc_issue_rate (void);
324 static void sparc_sched_init (FILE *, int, int);
325 static int sparc_use_sched_lookahead (void);
326
327 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
328 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
329 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
330 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
331 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
332
333 static bool sparc_function_ok_for_sibcall (tree, tree);
334 static void sparc_init_libfuncs (void);
335 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
336 HOST_WIDE_INT, tree);
337 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
338 HOST_WIDE_INT, tree);
339 static struct machine_function * sparc_init_machine_status (void);
340 static bool sparc_cannot_force_const_mem (rtx);
341 static rtx sparc_tls_get_addr (void);
342 static rtx sparc_tls_got (void);
343 static const char *get_some_local_dynamic_name (void);
344 static int get_some_local_dynamic_name_1 (rtx *, void *);
345 static bool sparc_rtx_costs (rtx, int, int, int *);
346 static bool sparc_promote_prototypes (tree);
347 static rtx sparc_struct_value_rtx (tree, int);
348 static bool sparc_return_in_memory (tree, tree);
349 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
350 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
351 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
352 enum machine_mode, tree, bool);
353 #ifdef SUBTARGET_ATTRIBUTE_TABLE
354 const struct attribute_spec sparc_attribute_table[];
355 #endif
356 \f
357 /* Option handling. */
358
359 /* Code model option as passed by user. */
360 const char *sparc_cmodel_string;
361 /* Parsed value. */
362 enum cmodel sparc_cmodel;
363
364 char sparc_hard_reg_printed[8];
365
366 struct sparc_cpu_select sparc_select[] =
367 {
368 /* switch name, tune arch */
369 { (char *)0, "default", 1, 1 },
370 { (char *)0, "-mcpu=", 1, 1 },
371 { (char *)0, "-mtune=", 1, 0 },
372 { 0, 0, 0, 0 }
373 };
374
375 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
376 enum processor_type sparc_cpu;
377 \f
378 /* Initialize the GCC target structure. */
379
380 /* The sparc default is to use .half rather than .short for aligned
381 HI objects. Use .word instead of .long on non-ELF systems. */
382 #undef TARGET_ASM_ALIGNED_HI_OP
383 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
384 #ifndef OBJECT_FORMAT_ELF
385 #undef TARGET_ASM_ALIGNED_SI_OP
386 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
387 #endif
388
389 #undef TARGET_ASM_UNALIGNED_HI_OP
390 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
391 #undef TARGET_ASM_UNALIGNED_SI_OP
392 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
393 #undef TARGET_ASM_UNALIGNED_DI_OP
394 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
395
396 /* The target hook has to handle DI-mode values. */
397 #undef TARGET_ASM_INTEGER
398 #define TARGET_ASM_INTEGER sparc_assemble_integer
399
400 #undef TARGET_ASM_FUNCTION_PROLOGUE
401 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
402 #undef TARGET_ASM_FUNCTION_EPILOGUE
403 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
404
405 #undef TARGET_SCHED_ADJUST_COST
406 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
407 #undef TARGET_SCHED_ISSUE_RATE
408 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
409 #undef TARGET_SCHED_INIT
410 #define TARGET_SCHED_INIT sparc_sched_init
411 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
412 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
413
414 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
415 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
416
417 #undef TARGET_INIT_LIBFUNCS
418 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
419
420 #ifdef HAVE_AS_TLS
421 #undef TARGET_HAVE_TLS
422 #define TARGET_HAVE_TLS true
423 #endif
424 #undef TARGET_CANNOT_FORCE_CONST_MEM
425 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
426
427 #undef TARGET_ASM_OUTPUT_MI_THUNK
428 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
429 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
430 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
431
432 #undef TARGET_RTX_COSTS
433 #define TARGET_RTX_COSTS sparc_rtx_costs
434 #undef TARGET_ADDRESS_COST
435 #define TARGET_ADDRESS_COST hook_int_rtx_0
436
437 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
438 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
439 test for this value. */
440 #undef TARGET_PROMOTE_FUNCTION_ARGS
441 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
442
443 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
444 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
445 test for this value. */
446 #undef TARGET_PROMOTE_FUNCTION_RETURN
447 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
448
449 #undef TARGET_PROMOTE_PROTOTYPES
450 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
451
452 #undef TARGET_STRUCT_VALUE_RTX
453 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
454 #undef TARGET_RETURN_IN_MEMORY
455 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
458 #undef TARGET_PASS_BY_REFERENCE
459 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
460
461 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
462 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
463 #undef TARGET_STRICT_ARGUMENT_NAMING
464 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
465
466 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
467 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
468
469 #ifdef SUBTARGET_INSERT_ATTRIBUTES
470 #undef TARGET_INSERT_ATTRIBUTES
471 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
472 #endif
473
474 #ifdef SUBTARGET_ATTRIBUTE_TABLE
475 #undef TARGET_ATTRIBUTE_TABLE
476 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
477 #endif
478
479 struct gcc_target targetm = TARGET_INITIALIZER;
480 \f
481 /* Validate and override various options, and do some machine dependent
482 initialization. */
483
484 void
485 sparc_override_options (void)
486 {
487 static struct code_model {
488 const char *const name;
489 const int value;
490 } const cmodels[] = {
491 { "32", CM_32 },
492 { "medlow", CM_MEDLOW },
493 { "medmid", CM_MEDMID },
494 { "medany", CM_MEDANY },
495 { "embmedany", CM_EMBMEDANY },
496 { 0, 0 }
497 };
498 const struct code_model *cmodel;
499 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
500 static struct cpu_default {
501 const int cpu;
502 const char *const name;
503 } const cpu_default[] = {
504 /* There must be one entry here for each TARGET_CPU value. */
505 { TARGET_CPU_sparc, "cypress" },
506 { TARGET_CPU_sparclet, "tsc701" },
507 { TARGET_CPU_sparclite, "f930" },
508 { TARGET_CPU_v8, "v8" },
509 { TARGET_CPU_hypersparc, "hypersparc" },
510 { TARGET_CPU_sparclite86x, "sparclite86x" },
511 { TARGET_CPU_supersparc, "supersparc" },
512 { TARGET_CPU_v9, "v9" },
513 { TARGET_CPU_ultrasparc, "ultrasparc" },
514 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
515 { 0, 0 }
516 };
517 const struct cpu_default *def;
518 /* Table of values for -m{cpu,tune}=. */
519 static struct cpu_table {
520 const char *const name;
521 const enum processor_type processor;
522 const int disable;
523 const int enable;
524 } const cpu_table[] = {
525 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
526 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
527 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
528 /* TI TMS390Z55 supersparc */
529 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
530 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
531 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
532 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
533 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
534 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
535 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
536 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
537 MASK_SPARCLITE },
538 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
539 /* TEMIC sparclet */
540 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
541 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
542 /* TI ultrasparc I, II, IIi */
543 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
544 /* Although insns using %y are deprecated, it is a clear win on current
545 ultrasparcs. */
546 |MASK_DEPRECATED_V8_INSNS},
547 /* TI ultrasparc III */
548 /* ??? Check if %y issue still holds true in ultra3. */
549 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
550 { 0, 0, 0, 0 }
551 };
552 const struct cpu_table *cpu;
553 const struct sparc_cpu_select *sel;
554 int fpu;
555
556 #ifndef SPARC_BI_ARCH
557 /* Check for unsupported architecture size. */
558 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
559 error ("%s is not supported by this configuration",
560 DEFAULT_ARCH32_P ? "-m64" : "-m32");
561 #endif
562
563 /* We force all 64bit archs to use 128 bit long double */
564 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
565 {
566 error ("-mlong-double-64 not allowed with -m64");
567 target_flags |= MASK_LONG_DOUBLE_128;
568 }
569
570 /* Code model selection. */
571 sparc_cmodel = SPARC_DEFAULT_CMODEL;
572
573 #ifdef SPARC_BI_ARCH
574 if (TARGET_ARCH32)
575 sparc_cmodel = CM_32;
576 #endif
577
578 if (sparc_cmodel_string != NULL)
579 {
580 if (TARGET_ARCH64)
581 {
582 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
583 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
584 break;
585 if (cmodel->name == NULL)
586 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
587 else
588 sparc_cmodel = cmodel->value;
589 }
590 else
591 error ("-mcmodel= is not supported on 32 bit systems");
592 }
593
594 fpu = TARGET_FPU; /* save current -mfpu status */
595
596 /* Set the default CPU. */
597 for (def = &cpu_default[0]; def->name; ++def)
598 if (def->cpu == TARGET_CPU_DEFAULT)
599 break;
600 if (! def->name)
601 abort ();
602 sparc_select[0].string = def->name;
603
604 for (sel = &sparc_select[0]; sel->name; ++sel)
605 {
606 if (sel->string)
607 {
608 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
609 if (! strcmp (sel->string, cpu->name))
610 {
611 if (sel->set_tune_p)
612 sparc_cpu = cpu->processor;
613
614 if (sel->set_arch_p)
615 {
616 target_flags &= ~cpu->disable;
617 target_flags |= cpu->enable;
618 }
619 break;
620 }
621
622 if (! cpu->name)
623 error ("bad value (%s) for %s switch", sel->string, sel->name);
624 }
625 }
626
627 /* If -mfpu or -mno-fpu was explicitly used, don't override with
628 the processor default. Clear MASK_FPU_SET to avoid confusing
629 the reverse mapping from switch values to names. */
630 if (TARGET_FPU_SET)
631 {
632 target_flags = (target_flags & ~MASK_FPU) | fpu;
633 target_flags &= ~MASK_FPU_SET;
634 }
635
636 /* Don't allow -mvis if FPU is disabled. */
637 if (! TARGET_FPU)
638 target_flags &= ~MASK_VIS;
639
640 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
641 are available.
642 -m64 also implies v9. */
643 if (TARGET_VIS || TARGET_ARCH64)
644 {
645 target_flags |= MASK_V9;
646 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
647 }
648
649 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
650 if (TARGET_V9 && TARGET_ARCH32)
651 target_flags |= MASK_DEPRECATED_V8_INSNS;
652
653 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
654 if (! TARGET_V9 || TARGET_ARCH64)
655 target_flags &= ~MASK_V8PLUS;
656
657 /* Don't use stack biasing in 32 bit mode. */
658 if (TARGET_ARCH32)
659 target_flags &= ~MASK_STACK_BIAS;
660
661 /* Supply a default value for align_functions. */
662 if (align_functions == 0
663 && (sparc_cpu == PROCESSOR_ULTRASPARC
664 || sparc_cpu == PROCESSOR_ULTRASPARC3))
665 align_functions = 32;
666
667 /* Validate PCC_STRUCT_RETURN. */
668 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
669 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
670
671 /* Only use .uaxword when compiling for a 64-bit target. */
672 if (!TARGET_ARCH64)
673 targetm.asm_out.unaligned_op.di = NULL;
674
675 /* Do various machine dependent initializations. */
676 sparc_init_modes ();
677
678 /* Acquire a unique set number for our register saves and restores. */
679 sparc_sr_alias_set = new_alias_set ();
680
681 /* Set up function hooks. */
682 init_machine_status = sparc_init_machine_status;
683
684 switch (sparc_cpu)
685 {
686 case PROCESSOR_V7:
687 case PROCESSOR_CYPRESS:
688 sparc_costs = &cypress_costs;
689 break;
690 case PROCESSOR_V8:
691 case PROCESSOR_SPARCLITE:
692 case PROCESSOR_SUPERSPARC:
693 sparc_costs = &supersparc_costs;
694 break;
695 case PROCESSOR_F930:
696 case PROCESSOR_F934:
697 case PROCESSOR_HYPERSPARC:
698 case PROCESSOR_SPARCLITE86X:
699 sparc_costs = &hypersparc_costs;
700 break;
701 case PROCESSOR_SPARCLET:
702 case PROCESSOR_TSC701:
703 sparc_costs = &sparclet_costs;
704 break;
705 case PROCESSOR_V9:
706 case PROCESSOR_ULTRASPARC:
707 sparc_costs = &ultrasparc_costs;
708 break;
709 case PROCESSOR_ULTRASPARC3:
710 sparc_costs = &ultrasparc3_costs;
711 break;
712 };
713 }
714 \f
715 #ifdef SUBTARGET_ATTRIBUTE_TABLE
716 /* Table of valid machine attributes. */
717 const struct attribute_spec sparc_attribute_table[] =
718 {
719 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
720 SUBTARGET_ATTRIBUTE_TABLE,
721 { NULL, 0, 0, false, false, false, NULL }
722 };
723 #endif
724 \f
725 /* Miscellaneous utilities. */
726
727 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
728 or branch on register contents instructions. */
729
730 int
731 v9_regcmp_p (enum rtx_code code)
732 {
733 return (code == EQ || code == NE || code == GE || code == LT
734 || code == LE || code == GT);
735 }
736
737 \f
738 /* Operand constraints. */
739
740 /* Return nonzero only if OP is a register of mode MODE,
741 or const0_rtx. */
742
743 int
744 reg_or_0_operand (rtx op, enum machine_mode mode)
745 {
746 if (register_operand (op, mode))
747 return 1;
748 if (op == const0_rtx)
749 return 1;
750 if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
751 && CONST_DOUBLE_HIGH (op) == 0
752 && CONST_DOUBLE_LOW (op) == 0)
753 return 1;
754 if (fp_zero_operand (op, mode))
755 return 1;
756 return 0;
757 }
758
759 /* Return nonzero only if OP is const1_rtx. */
760
761 int
762 const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
763 {
764 return op == const1_rtx;
765 }
766
767 /* Nonzero if OP is a floating point value with value 0.0. */
768
769 int
770 fp_zero_operand (rtx op, enum machine_mode mode)
771 {
772 if (GET_MODE_CLASS (GET_MODE (op)) != MODE_FLOAT)
773 return 0;
774 return op == CONST0_RTX (mode);
775 }
776
777 /* Nonzero if OP is a register operand in floating point register. */
778
779 int
780 fp_register_operand (rtx op, enum machine_mode mode)
781 {
782 if (! register_operand (op, mode))
783 return 0;
784 if (GET_CODE (op) == SUBREG)
785 op = SUBREG_REG (op);
786 return GET_CODE (op) == REG && SPARC_FP_REG_P (REGNO (op));
787 }
788
789 /* Nonzero if OP is a floating point constant which can
790 be loaded into an integer register using a single
791 sethi instruction. */
792
793 int
794 fp_sethi_p (rtx op)
795 {
796 if (GET_CODE (op) == CONST_DOUBLE)
797 {
798 REAL_VALUE_TYPE r;
799 long i;
800
801 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
802 if (REAL_VALUES_EQUAL (r, dconst0) &&
803 ! REAL_VALUE_MINUS_ZERO (r))
804 return 0;
805 REAL_VALUE_TO_TARGET_SINGLE (r, i);
806 if (SPARC_SETHI_P (i))
807 return 1;
808 }
809
810 return 0;
811 }
812
813 /* Nonzero if OP is a floating point constant which can
814 be loaded into an integer register using a single
815 mov instruction. */
816
817 int
818 fp_mov_p (rtx op)
819 {
820 if (GET_CODE (op) == CONST_DOUBLE)
821 {
822 REAL_VALUE_TYPE r;
823 long i;
824
825 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
826 if (REAL_VALUES_EQUAL (r, dconst0) &&
827 ! REAL_VALUE_MINUS_ZERO (r))
828 return 0;
829 REAL_VALUE_TO_TARGET_SINGLE (r, i);
830 if (SPARC_SIMM13_P (i))
831 return 1;
832 }
833
834 return 0;
835 }
836
837 /* Nonzero if OP is a floating point constant which can
838 be loaded into an integer register using a high/losum
839 instruction sequence. */
840
841 int
842 fp_high_losum_p (rtx op)
843 {
844 /* The constraints calling this should only be in
845 SFmode move insns, so any constant which cannot
846 be moved using a single insn will do. */
847 if (GET_CODE (op) == CONST_DOUBLE)
848 {
849 REAL_VALUE_TYPE r;
850 long i;
851
852 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
853 if (REAL_VALUES_EQUAL (r, dconst0) &&
854 ! REAL_VALUE_MINUS_ZERO (r))
855 return 0;
856 REAL_VALUE_TO_TARGET_SINGLE (r, i);
857 if (! SPARC_SETHI_P (i)
858 && ! SPARC_SIMM13_P (i))
859 return 1;
860 }
861
862 return 0;
863 }
864
865 /* Nonzero if OP is an integer register. */
866
867 int
868 intreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
869 {
870 return (register_operand (op, SImode)
871 || (TARGET_ARCH64 && register_operand (op, DImode)));
872 }
873
874 /* Nonzero if OP is a floating point condition code register. */
875
876 int
877 fcc_reg_operand (rtx op, enum machine_mode mode)
878 {
879 /* This can happen when recog is called from combine. Op may be a MEM.
880 Fail instead of calling abort in this case. */
881 if (GET_CODE (op) != REG)
882 return 0;
883
884 if (mode != VOIDmode && mode != GET_MODE (op))
885 return 0;
886 if (mode == VOIDmode
887 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
888 return 0;
889
890 #if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
891 if (reg_renumber == 0)
892 return REGNO (op) >= FIRST_PSEUDO_REGISTER;
893 return REGNO_OK_FOR_CCFP_P (REGNO (op));
894 #else
895 return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
896 #endif
897 }
898
899 /* Nonzero if OP is a floating point condition code fcc0 register. */
900
901 int
902 fcc0_reg_operand (rtx op, enum machine_mode mode)
903 {
904 /* This can happen when recog is called from combine. Op may be a MEM.
905 Fail instead of calling abort in this case. */
906 if (GET_CODE (op) != REG)
907 return 0;
908
909 if (mode != VOIDmode && mode != GET_MODE (op))
910 return 0;
911 if (mode == VOIDmode
912 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
913 return 0;
914
915 return REGNO (op) == SPARC_FCC_REG;
916 }
917
918 /* Nonzero if OP is an integer or floating point condition code register. */
919
920 int
921 icc_or_fcc_reg_operand (rtx op, enum machine_mode mode)
922 {
923 if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
924 {
925 if (mode != VOIDmode && mode != GET_MODE (op))
926 return 0;
927 if (mode == VOIDmode
928 && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
929 return 0;
930 return 1;
931 }
932
933 return fcc_reg_operand (op, mode);
934 }
935
936 /* Call insn on SPARC can take a PC-relative constant address, or any regular
937 memory address. */
938
939 int
940 call_operand (rtx op, enum machine_mode mode)
941 {
942 if (GET_CODE (op) != MEM)
943 abort ();
944 op = XEXP (op, 0);
945 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
946 }
947
948 int
949 call_operand_address (rtx op, enum machine_mode mode)
950 {
951 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
952 }
953
954 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
955 otherwise return 0. */
956
957 int
958 tls_symbolic_operand (rtx op)
959 {
960 if (GET_CODE (op) != SYMBOL_REF)
961 return 0;
962 return SYMBOL_REF_TLS_MODEL (op);
963 }
964
965 int
966 tgd_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
967 {
968 return tls_symbolic_operand (op) == TLS_MODEL_GLOBAL_DYNAMIC;
969 }
970
971 int
972 tld_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
973 {
974 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_DYNAMIC;
975 }
976
977 int
978 tie_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
979 {
980 return tls_symbolic_operand (op) == TLS_MODEL_INITIAL_EXEC;
981 }
982
983 int
984 tle_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
985 {
986 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_EXEC;
987 }
988
989 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
990 reference and a constant. */
991
992 int
993 symbolic_operand (register rtx op, enum machine_mode mode)
994 {
995 enum machine_mode omode = GET_MODE (op);
996
997 if (omode != mode && omode != VOIDmode && mode != VOIDmode)
998 return 0;
999
1000 switch (GET_CODE (op))
1001 {
1002 case SYMBOL_REF:
1003 return !SYMBOL_REF_TLS_MODEL (op);
1004
1005 case LABEL_REF:
1006 return 1;
1007
1008 case CONST:
1009 op = XEXP (op, 0);
1010 return (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1011 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
1012 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1013 && GET_CODE (XEXP (op, 1)) == CONST_INT);
1014
1015 default:
1016 return 0;
1017 }
1018 }
1019
1020 /* Return truth value of statement that OP is a symbolic memory
1021 operand of mode MODE. */
1022
1023 int
1024 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1025 {
1026 if (GET_CODE (op) == SUBREG)
1027 op = SUBREG_REG (op);
1028 if (GET_CODE (op) != MEM)
1029 return 0;
1030 op = XEXP (op, 0);
1031 return ((GET_CODE (op) == SYMBOL_REF && !SYMBOL_REF_TLS_MODEL (op))
1032 || GET_CODE (op) == CONST || GET_CODE (op) == HIGH
1033 || GET_CODE (op) == LABEL_REF);
1034 }
1035
1036 /* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
1037
1038 int
1039 label_ref_operand (rtx op, enum machine_mode mode)
1040 {
1041 if (GET_CODE (op) != LABEL_REF)
1042 return 0;
1043 if (GET_MODE (op) != mode)
1044 return 0;
1045 return 1;
1046 }
1047
1048 /* Return 1 if the operand is an argument used in generating pic references
1049 in either the medium/low or medium/anywhere code models of sparc64. */
1050
1051 int
1052 sp64_medium_pic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1053 {
1054 /* Check for (const (minus (symbol_ref:GOT)
1055 (const (minus (label) (pc))))). */
1056 if (GET_CODE (op) != CONST)
1057 return 0;
1058 op = XEXP (op, 0);
1059 if (GET_CODE (op) != MINUS)
1060 return 0;
1061 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
1062 return 0;
1063 /* ??? Ensure symbol is GOT. */
1064 if (GET_CODE (XEXP (op, 1)) != CONST)
1065 return 0;
1066 if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
1067 return 0;
1068 return 1;
1069 }
1070
1071 /* Return 1 if the operand is a data segment reference. This includes
1072 the readonly data segment, or in other words anything but the text segment.
1073 This is needed in the medium/anywhere code model on v9. These values
1074 are accessed with EMBMEDANY_BASE_REG. */
1075
1076 int
1077 data_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1078 {
1079 switch (GET_CODE (op))
1080 {
1081 case SYMBOL_REF :
1082 return ! SYMBOL_REF_FUNCTION_P (op);
1083 case PLUS :
1084 /* Assume canonical format of symbol + constant.
1085 Fall through. */
1086 case CONST :
1087 return data_segment_operand (XEXP (op, 0), VOIDmode);
1088 default :
1089 return 0;
1090 }
1091 }
1092
1093 /* Return 1 if the operand is a text segment reference.
1094 This is needed in the medium/anywhere code model on v9. */
1095
1096 int
1097 text_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1098 {
1099 switch (GET_CODE (op))
1100 {
1101 case LABEL_REF :
1102 return 1;
1103 case SYMBOL_REF :
1104 return SYMBOL_REF_FUNCTION_P (op);
1105 case PLUS :
1106 /* Assume canonical format of symbol + constant.
1107 Fall through. */
1108 case CONST :
1109 return text_segment_operand (XEXP (op, 0), VOIDmode);
1110 default :
1111 return 0;
1112 }
1113 }
1114
1115 /* Return 1 if the operand is either a register or a memory operand that is
1116 not symbolic. */
1117
1118 int
1119 reg_or_nonsymb_mem_operand (register rtx op, enum machine_mode mode)
1120 {
1121 if (register_operand (op, mode))
1122 return 1;
1123
1124 if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
1125 return 1;
1126
1127 return 0;
1128 }
1129
1130 int
1131 splittable_symbolic_memory_operand (rtx op,
1132 enum machine_mode mode ATTRIBUTE_UNUSED)
1133 {
1134 if (GET_CODE (op) != MEM)
1135 return 0;
1136 if (! symbolic_operand (XEXP (op, 0), Pmode))
1137 return 0;
1138 return 1;
1139 }
1140
1141 int
1142 splittable_immediate_memory_operand (rtx op,
1143 enum machine_mode mode ATTRIBUTE_UNUSED)
1144 {
1145 if (GET_CODE (op) != MEM)
1146 return 0;
1147 if (! immediate_operand (XEXP (op, 0), Pmode))
1148 return 0;
1149 return 1;
1150 }
1151
1152 /* Return truth value of whether OP is EQ or NE. */
1153
1154 int
1155 eq_or_neq (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1156 {
1157 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
1158 }
1159
1160 /* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
1161 or LTU for non-floating-point. We handle those specially. */
1162
1163 int
1164 normal_comp_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1165 {
1166 enum rtx_code code;
1167
1168 if (!COMPARISON_P (op))
1169 return 0;
1170
1171 if (GET_MODE (XEXP (op, 0)) == CCFPmode
1172 || GET_MODE (XEXP (op, 0)) == CCFPEmode)
1173 return 1;
1174
1175 code = GET_CODE (op);
1176 return (code != NE && code != EQ && code != GEU && code != LTU);
1177 }
1178
1179 /* Return 1 if this is a comparison operator. This allows the use of
1180 MATCH_OPERATOR to recognize all the branch insns. */
1181
1182 int
1183 noov_compare_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1184 {
1185 enum rtx_code code;
1186
1187 if (!COMPARISON_P (op))
1188 return 0;
1189
1190 code = GET_CODE (op);
1191 if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode
1192 || GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
1193 /* These are the only branches which work with CC_NOOVmode. */
1194 return (code == EQ || code == NE || code == GE || code == LT);
1195 return 1;
1196 }
1197
1198 /* Return 1 if this is a 64-bit comparison operator. This allows the use of
1199 MATCH_OPERATOR to recognize all the branch insns. */
1200
1201 int
1202 noov_compare64_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1203 {
1204 enum rtx_code code;
1205
1206 if (! TARGET_V9)
1207 return 0;
1208
1209 if (!COMPARISON_P (op))
1210 return 0;
1211
1212 code = GET_CODE (op);
1213 if (GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
1214 /* These are the only branches which work with CCX_NOOVmode. */
1215 return (code == EQ || code == NE || code == GE || code == LT);
1216 return (GET_MODE (XEXP (op, 0)) == CCXmode);
1217 }
1218
1219 /* Nonzero if OP is a comparison operator suitable for use in v9
1220 conditional move or branch on register contents instructions. */
1221
1222 int
1223 v9_regcmp_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1224 {
1225 enum rtx_code code;
1226
1227 if (!COMPARISON_P (op))
1228 return 0;
1229
1230 code = GET_CODE (op);
1231 return v9_regcmp_p (code);
1232 }
1233
1234 /* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
1235
1236 int
1237 extend_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1238 {
1239 return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
1240 }
1241
1242 /* Return nonzero if OP is an operator of mode MODE which can set
1243 the condition codes explicitly. We do not include PLUS and MINUS
1244 because these require CC_NOOVmode, which we handle explicitly. */
1245
1246 int
1247 cc_arithop (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1248 {
1249 if (GET_CODE (op) == AND
1250 || GET_CODE (op) == IOR
1251 || GET_CODE (op) == XOR)
1252 return 1;
1253
1254 return 0;
1255 }
1256
1257 /* Return nonzero if OP is an operator of mode MODE which can bitwise
1258 complement its second operand and set the condition codes explicitly. */
1259
1260 int
1261 cc_arithopn (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1262 {
1263 /* XOR is not here because combine canonicalizes (xor (not ...) ...)
1264 and (xor ... (not ...)) to (not (xor ...)). */
1265 return (GET_CODE (op) == AND
1266 || GET_CODE (op) == IOR);
1267 }
1268 \f
1269 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1270 signed 13 bit immediate field. This is an acceptable SImode operand for
1271 most 3 address instructions. */
1272
1273 int
1274 arith_operand (rtx op, enum machine_mode mode)
1275 {
1276 if (register_operand (op, mode))
1277 return 1;
1278 if (GET_CODE (op) != CONST_INT)
1279 return 0;
1280 return SMALL_INT32 (op);
1281 }
1282
1283 /* Return true if OP is a constant 4096 */
1284
1285 int
1286 arith_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1287 {
1288 if (GET_CODE (op) != CONST_INT)
1289 return 0;
1290 else
1291 return INTVAL (op) == 4096;
1292 }
1293
1294 /* Return true if OP is suitable as second operand for add/sub */
1295
1296 int
1297 arith_add_operand (rtx op, enum machine_mode mode)
1298 {
1299 return arith_operand (op, mode) || arith_4096_operand (op, mode);
1300 }
1301
1302 /* Return true if OP is a CONST_INT or a CONST_DOUBLE which can fit in the
1303 immediate field of OR and XOR instructions. Used for 64-bit
1304 constant formation patterns. */
1305 int
1306 const64_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1307 {
1308 return ((GET_CODE (op) == CONST_INT
1309 && SPARC_SIMM13_P (INTVAL (op)))
1310 #if HOST_BITS_PER_WIDE_INT != 64
1311 || (GET_CODE (op) == CONST_DOUBLE
1312 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1313 && (CONST_DOUBLE_HIGH (op) ==
1314 ((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ?
1315 (HOST_WIDE_INT)-1 : 0)))
1316 #endif
1317 );
1318 }
1319
1320 /* The same, but only for sethi instructions. */
1321 int
1322 const64_high_operand (rtx op, enum machine_mode mode)
1323 {
1324 return ((GET_CODE (op) == CONST_INT
1325 && (INTVAL (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1326 && SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1327 )
1328 || (GET_CODE (op) == CONST_DOUBLE
1329 && CONST_DOUBLE_HIGH (op) == 0
1330 && (CONST_DOUBLE_LOW (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1331 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op))));
1332 }
1333
1334 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1335 signed 11 bit immediate field. This is an acceptable SImode operand for
1336 the movcc instructions. */
1337
1338 int
1339 arith11_operand (rtx op, enum machine_mode mode)
1340 {
1341 return (register_operand (op, mode)
1342 || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
1343 }
1344
1345 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1346 signed 10 bit immediate field. This is an acceptable SImode operand for
1347 the movrcc instructions. */
1348
1349 int
1350 arith10_operand (rtx op, enum machine_mode mode)
1351 {
1352 return (register_operand (op, mode)
1353 || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
1354 }
1355
1356 /* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
1357 immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
1358 immediate field.
1359 ARCH64: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1360 can fit in a 13 bit immediate field. This is an acceptable DImode operand
1361 for most 3 address instructions. */
1362
1363 int
1364 arith_double_operand (rtx op, enum machine_mode mode)
1365 {
1366 return (register_operand (op, mode)
1367 || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
1368 || (! TARGET_ARCH64
1369 && GET_CODE (op) == CONST_DOUBLE
1370 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1371 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
1372 || (TARGET_ARCH64
1373 && GET_CODE (op) == CONST_DOUBLE
1374 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1375 && ((CONST_DOUBLE_HIGH (op) == -1
1376 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
1377 || (CONST_DOUBLE_HIGH (op) == 0
1378 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
1379 }
1380
1381 /* Return true if OP is a constant 4096 for DImode on ARCH64 */
1382
1383 int
1384 arith_double_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1385 {
1386 return (TARGET_ARCH64 &&
1387 ((GET_CODE (op) == CONST_INT && INTVAL (op) == 4096) ||
1388 (GET_CODE (op) == CONST_DOUBLE &&
1389 CONST_DOUBLE_LOW (op) == 4096 &&
1390 CONST_DOUBLE_HIGH (op) == 0)));
1391 }
1392
1393 /* Return true if OP is suitable as second operand for add/sub in DImode */
1394
1395 int
1396 arith_double_add_operand (rtx op, enum machine_mode mode)
1397 {
1398 return arith_double_operand (op, mode) || arith_double_4096_operand (op, mode);
1399 }
1400
1401 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1402 can fit in an 11 bit immediate field. This is an acceptable DImode
1403 operand for the movcc instructions. */
1404 /* ??? Replace with arith11_operand? */
1405
1406 int
1407 arith11_double_operand (rtx op, enum machine_mode mode)
1408 {
1409 return (register_operand (op, mode)
1410 || (GET_CODE (op) == CONST_DOUBLE
1411 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1412 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
1413 && ((CONST_DOUBLE_HIGH (op) == -1
1414 && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
1415 || (CONST_DOUBLE_HIGH (op) == 0
1416 && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
1417 || (GET_CODE (op) == CONST_INT
1418 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1419 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
1420 }
1421
1422 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1423 can fit in an 10 bit immediate field. This is an acceptable DImode
1424 operand for the movrcc instructions. */
1425 /* ??? Replace with arith10_operand? */
1426
1427 int
1428 arith10_double_operand (rtx op, enum machine_mode mode)
1429 {
1430 return (register_operand (op, mode)
1431 || (GET_CODE (op) == CONST_DOUBLE
1432 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1433 && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
1434 && ((CONST_DOUBLE_HIGH (op) == -1
1435 && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
1436 || (CONST_DOUBLE_HIGH (op) == 0
1437 && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
1438 || (GET_CODE (op) == CONST_INT
1439 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1440 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
1441 }
1442
1443 /* Return truth value of whether OP is an integer which fits the
1444 range constraining immediate operands in most three-address insns,
1445 which have a 13 bit immediate field. */
1446
1447 int
1448 small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1449 {
1450 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
1451 }
1452
1453 int
1454 small_int_or_double (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1455 {
1456 return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
1457 || (GET_CODE (op) == CONST_DOUBLE
1458 && CONST_DOUBLE_HIGH (op) == 0
1459 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))));
1460 }
1461
1462 /* Recognize operand values for the umul instruction. That instruction sign
1463 extends immediate values just like all other sparc instructions, but
1464 interprets the extended result as an unsigned number. */
1465
1466 int
1467 uns_small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1468 {
1469 #if HOST_BITS_PER_WIDE_INT > 32
1470 /* All allowed constants will fit a CONST_INT. */
1471 return (GET_CODE (op) == CONST_INT
1472 && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
1473 || (INTVAL (op) >= 0xFFFFF000
1474 && INTVAL (op) <= 0xFFFFFFFF)));
1475 #else
1476 return ((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
1477 || (GET_CODE (op) == CONST_DOUBLE
1478 && CONST_DOUBLE_HIGH (op) == 0
1479 && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000));
1480 #endif
1481 }
1482
1483 int
1484 uns_arith_operand (rtx op, enum machine_mode mode)
1485 {
1486 return register_operand (op, mode) || uns_small_int (op, mode);
1487 }
1488
1489 /* Return truth value of statement that OP is a call-clobbered register. */
1490 int
1491 clobbered_register (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1492 {
1493 return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
1494 }
1495
1496 /* Return 1 if OP is a valid operand for the source of a move insn. */
1497
1498 int
1499 input_operand (rtx op, enum machine_mode mode)
1500 {
1501 /* If both modes are non-void they must be the same. */
1502 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1503 return 0;
1504
1505 /* Allow any one instruction integer constant, and all CONST_INT
1506 variants when we are working in DImode and !arch64. */
1507 if (GET_MODE_CLASS (mode) == MODE_INT
1508 && ((GET_CODE (op) == CONST_INT
1509 && (SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1510 || SPARC_SIMM13_P (INTVAL (op))
1511 || (mode == DImode
1512 && ! TARGET_ARCH64)))
1513 || (TARGET_ARCH64
1514 && GET_CODE (op) == CONST_DOUBLE
1515 && ((CONST_DOUBLE_HIGH (op) == 0
1516 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op)))
1517 ||
1518 #if HOST_BITS_PER_WIDE_INT == 64
1519 (CONST_DOUBLE_HIGH (op) == 0
1520 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)))
1521 #else
1522 (SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1523 && (((CONST_DOUBLE_LOW (op) & 0x80000000) == 0
1524 && CONST_DOUBLE_HIGH (op) == 0)
1525 || (CONST_DOUBLE_HIGH (op) == -1
1526 && CONST_DOUBLE_LOW (op) & 0x80000000) != 0))
1527 #endif
1528 ))))
1529 return 1;
1530
1531 /* If !arch64 and this is a DImode const, allow it so that
1532 the splits can be generated. */
1533 if (! TARGET_ARCH64
1534 && mode == DImode
1535 && GET_CODE (op) == CONST_DOUBLE)
1536 return 1;
1537
1538 if (register_operand (op, mode))
1539 return 1;
1540
1541 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1542 && GET_CODE (op) == CONST_DOUBLE)
1543 return 1;
1544
1545 /* If this is a SUBREG, look inside so that we handle
1546 paradoxical ones. */
1547 if (GET_CODE (op) == SUBREG)
1548 op = SUBREG_REG (op);
1549
1550 /* Check for valid MEM forms. */
1551 if (GET_CODE (op) == MEM)
1552 return memory_address_p (mode, XEXP (op, 0));
1553
1554 return 0;
1555 }
1556
1557 /* Return 1 if OP is valid for the lhs of a compare insn. */
1558
1559 int
1560 compare_operand (rtx op, enum machine_mode mode)
1561 {
1562 if (GET_CODE (op) == ZERO_EXTRACT)
1563 return (register_operand (XEXP (op, 0), mode)
1564 && small_int_or_double (XEXP (op, 1), mode)
1565 && small_int_or_double (XEXP (op, 2), mode)
1566 /* This matches cmp_zero_extract. */
1567 && ((mode == SImode
1568 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1569 && INTVAL (XEXP (op, 2)) > 19)
1570 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1571 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 19)))
1572 /* This matches cmp_zero_extract_sp64. */
1573 || (mode == DImode
1574 && TARGET_ARCH64
1575 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1576 && INTVAL (XEXP (op, 2)) > 51)
1577 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1578 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 51)))));
1579 else
1580 return register_operand (op, mode);
1581 }
1582
1583 \f
1584 /* We know it can't be done in one insn when we get here,
1585 the movsi expander guarantees this. */
1586 void
1587 sparc_emit_set_const32 (rtx op0, rtx op1)
1588 {
1589 enum machine_mode mode = GET_MODE (op0);
1590 rtx temp;
1591
1592 if (GET_CODE (op1) == CONST_INT)
1593 {
1594 HOST_WIDE_INT value = INTVAL (op1);
1595
1596 if (SPARC_SETHI_P (value & GET_MODE_MASK (mode))
1597 || SPARC_SIMM13_P (value))
1598 abort ();
1599 }
1600
1601 /* Full 2-insn decomposition is needed. */
1602 if (reload_in_progress || reload_completed)
1603 temp = op0;
1604 else
1605 temp = gen_reg_rtx (mode);
1606
1607 if (GET_CODE (op1) == CONST_INT)
1608 {
1609 /* Emit them as real moves instead of a HIGH/LO_SUM,
1610 this way CSE can see everything and reuse intermediate
1611 values if it wants. */
1612 if (TARGET_ARCH64
1613 && HOST_BITS_PER_WIDE_INT != 64
1614 && (INTVAL (op1) & 0x80000000) != 0)
1615 emit_insn (gen_rtx_SET
1616 (VOIDmode, temp,
1617 immed_double_const (INTVAL (op1) & ~(HOST_WIDE_INT)0x3ff,
1618 0, DImode)));
1619 else
1620 emit_insn (gen_rtx_SET (VOIDmode, temp,
1621 GEN_INT (INTVAL (op1)
1622 & ~(HOST_WIDE_INT)0x3ff)));
1623
1624 emit_insn (gen_rtx_SET (VOIDmode,
1625 op0,
1626 gen_rtx_IOR (mode, temp,
1627 GEN_INT (INTVAL (op1) & 0x3ff))));
1628 }
1629 else
1630 {
1631 /* A symbol, emit in the traditional way. */
1632 emit_insn (gen_rtx_SET (VOIDmode, temp,
1633 gen_rtx_HIGH (mode, op1)));
1634 emit_insn (gen_rtx_SET (VOIDmode,
1635 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1636
1637 }
1638 }
1639
1640 \f
1641 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1642 If TEMP is nonzero, we are forbidden to use any other scratch
1643 registers. Otherwise, we are allowed to generate them as needed.
1644
1645 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1646 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1647 void
1648 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1649 {
1650 rtx temp1, temp2, temp3, temp4, temp5;
1651 rtx ti_temp = 0;
1652
1653 if (temp && GET_MODE (temp) == TImode)
1654 {
1655 ti_temp = temp;
1656 temp = gen_rtx_REG (DImode, REGNO (temp));
1657 }
1658
1659 /* SPARC-V9 code-model support. */
1660 switch (sparc_cmodel)
1661 {
1662 case CM_MEDLOW:
1663 /* The range spanned by all instructions in the object is less
1664 than 2^31 bytes (2GB) and the distance from any instruction
1665 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1666 than 2^31 bytes (2GB).
1667
1668 The executable must be in the low 4TB of the virtual address
1669 space.
1670
1671 sethi %hi(symbol), %temp1
1672 or %temp1, %lo(symbol), %reg */
1673 if (temp)
1674 temp1 = temp; /* op0 is allowed. */
1675 else
1676 temp1 = gen_reg_rtx (DImode);
1677
1678 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1679 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1680 break;
1681
1682 case CM_MEDMID:
1683 /* The range spanned by all instructions in the object is less
1684 than 2^31 bytes (2GB) and the distance from any instruction
1685 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1686 than 2^31 bytes (2GB).
1687
1688 The executable must be in the low 16TB of the virtual address
1689 space.
1690
1691 sethi %h44(symbol), %temp1
1692 or %temp1, %m44(symbol), %temp2
1693 sllx %temp2, 12, %temp3
1694 or %temp3, %l44(symbol), %reg */
1695 if (temp)
1696 {
1697 temp1 = op0;
1698 temp2 = op0;
1699 temp3 = temp; /* op0 is allowed. */
1700 }
1701 else
1702 {
1703 temp1 = gen_reg_rtx (DImode);
1704 temp2 = gen_reg_rtx (DImode);
1705 temp3 = gen_reg_rtx (DImode);
1706 }
1707
1708 emit_insn (gen_seth44 (temp1, op1));
1709 emit_insn (gen_setm44 (temp2, temp1, op1));
1710 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1711 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1712 emit_insn (gen_setl44 (op0, temp3, op1));
1713 break;
1714
1715 case CM_MEDANY:
1716 /* The range spanned by all instructions in the object is less
1717 than 2^31 bytes (2GB) and the distance from any instruction
1718 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1719 than 2^31 bytes (2GB).
1720
1721 The executable can be placed anywhere in the virtual address
1722 space.
1723
1724 sethi %hh(symbol), %temp1
1725 sethi %lm(symbol), %temp2
1726 or %temp1, %hm(symbol), %temp3
1727 sllx %temp3, 32, %temp4
1728 or %temp4, %temp2, %temp5
1729 or %temp5, %lo(symbol), %reg */
1730 if (temp)
1731 {
1732 /* It is possible that one of the registers we got for operands[2]
1733 might coincide with that of operands[0] (which is why we made
1734 it TImode). Pick the other one to use as our scratch. */
1735 if (rtx_equal_p (temp, op0))
1736 {
1737 if (ti_temp)
1738 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1739 else
1740 abort();
1741 }
1742 temp1 = op0;
1743 temp2 = temp; /* op0 is _not_ allowed, see above. */
1744 temp3 = op0;
1745 temp4 = op0;
1746 temp5 = op0;
1747 }
1748 else
1749 {
1750 temp1 = gen_reg_rtx (DImode);
1751 temp2 = gen_reg_rtx (DImode);
1752 temp3 = gen_reg_rtx (DImode);
1753 temp4 = gen_reg_rtx (DImode);
1754 temp5 = gen_reg_rtx (DImode);
1755 }
1756
1757 emit_insn (gen_sethh (temp1, op1));
1758 emit_insn (gen_setlm (temp2, op1));
1759 emit_insn (gen_sethm (temp3, temp1, op1));
1760 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1761 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1762 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1763 gen_rtx_PLUS (DImode, temp4, temp2)));
1764 emit_insn (gen_setlo (op0, temp5, op1));
1765 break;
1766
1767 case CM_EMBMEDANY:
1768 /* Old old old backwards compatibility kruft here.
1769 Essentially it is MEDLOW with a fixed 64-bit
1770 virtual base added to all data segment addresses.
1771 Text-segment stuff is computed like MEDANY, we can't
1772 reuse the code above because the relocation knobs
1773 look different.
1774
1775 Data segment: sethi %hi(symbol), %temp1
1776 add %temp1, EMBMEDANY_BASE_REG, %temp2
1777 or %temp2, %lo(symbol), %reg */
1778 if (data_segment_operand (op1, GET_MODE (op1)))
1779 {
1780 if (temp)
1781 {
1782 temp1 = temp; /* op0 is allowed. */
1783 temp2 = op0;
1784 }
1785 else
1786 {
1787 temp1 = gen_reg_rtx (DImode);
1788 temp2 = gen_reg_rtx (DImode);
1789 }
1790
1791 emit_insn (gen_embmedany_sethi (temp1, op1));
1792 emit_insn (gen_embmedany_brsum (temp2, temp1));
1793 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1794 }
1795
1796 /* Text segment: sethi %uhi(symbol), %temp1
1797 sethi %hi(symbol), %temp2
1798 or %temp1, %ulo(symbol), %temp3
1799 sllx %temp3, 32, %temp4
1800 or %temp4, %temp2, %temp5
1801 or %temp5, %lo(symbol), %reg */
1802 else
1803 {
1804 if (temp)
1805 {
1806 /* It is possible that one of the registers we got for operands[2]
1807 might coincide with that of operands[0] (which is why we made
1808 it TImode). Pick the other one to use as our scratch. */
1809 if (rtx_equal_p (temp, op0))
1810 {
1811 if (ti_temp)
1812 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1813 else
1814 abort();
1815 }
1816 temp1 = op0;
1817 temp2 = temp; /* op0 is _not_ allowed, see above. */
1818 temp3 = op0;
1819 temp4 = op0;
1820 temp5 = op0;
1821 }
1822 else
1823 {
1824 temp1 = gen_reg_rtx (DImode);
1825 temp2 = gen_reg_rtx (DImode);
1826 temp3 = gen_reg_rtx (DImode);
1827 temp4 = gen_reg_rtx (DImode);
1828 temp5 = gen_reg_rtx (DImode);
1829 }
1830
1831 emit_insn (gen_embmedany_textuhi (temp1, op1));
1832 emit_insn (gen_embmedany_texthi (temp2, op1));
1833 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1834 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1835 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1836 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1837 gen_rtx_PLUS (DImode, temp4, temp2)));
1838 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1839 }
1840 break;
1841
1842 default:
1843 abort();
1844 }
1845 }
1846
1847 /* These avoid problems when cross compiling. If we do not
1848 go through all this hair then the optimizer will see
1849 invalid REG_EQUAL notes or in some cases none at all. */
1850 static void sparc_emit_set_safe_HIGH64 (rtx, HOST_WIDE_INT);
1851 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1852 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1853 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1854
1855 #if HOST_BITS_PER_WIDE_INT == 64
1856 #define GEN_HIGHINT64(__x) GEN_INT ((__x) & ~(HOST_WIDE_INT)0x3ff)
1857 #define GEN_INT64(__x) GEN_INT (__x)
1858 #else
1859 #define GEN_HIGHINT64(__x) \
1860 immed_double_const ((__x) & ~(HOST_WIDE_INT)0x3ff, 0, DImode)
1861 #define GEN_INT64(__x) \
1862 immed_double_const ((__x) & 0xffffffff, \
1863 ((__x) & 0x80000000 ? -1 : 0), DImode)
1864 #endif
1865
1866 /* The optimizer is not to assume anything about exactly
1867 which bits are set for a HIGH, they are unspecified.
1868 Unfortunately this leads to many missed optimizations
1869 during CSE. We mask out the non-HIGH bits, and matches
1870 a plain movdi, to alleviate this problem. */
1871 static void
1872 sparc_emit_set_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1873 {
1874 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_HIGHINT64 (val)));
1875 }
1876
1877 static rtx
1878 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1879 {
1880 return gen_rtx_SET (VOIDmode, dest, GEN_INT64 (val));
1881 }
1882
1883 static rtx
1884 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1885 {
1886 return gen_rtx_IOR (DImode, src, GEN_INT64 (val));
1887 }
1888
1889 static rtx
1890 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1891 {
1892 return gen_rtx_XOR (DImode, src, GEN_INT64 (val));
1893 }
1894
1895 /* Worker routines for 64-bit constant formation on arch64.
1896 One of the key things to be doing in these emissions is
1897 to create as many temp REGs as possible. This makes it
1898 possible for half-built constants to be used later when
1899 such values are similar to something required later on.
1900 Without doing this, the optimizer cannot see such
1901 opportunities. */
1902
1903 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1904 unsigned HOST_WIDE_INT, int);
1905
1906 static void
1907 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1908 unsigned HOST_WIDE_INT low_bits, int is_neg)
1909 {
1910 unsigned HOST_WIDE_INT high_bits;
1911
1912 if (is_neg)
1913 high_bits = (~low_bits) & 0xffffffff;
1914 else
1915 high_bits = low_bits;
1916
1917 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1918 if (!is_neg)
1919 {
1920 emit_insn (gen_rtx_SET (VOIDmode, op0,
1921 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1922 }
1923 else
1924 {
1925 /* If we are XOR'ing with -1, then we should emit a one's complement
1926 instead. This way the combiner will notice logical operations
1927 such as ANDN later on and substitute. */
1928 if ((low_bits & 0x3ff) == 0x3ff)
1929 {
1930 emit_insn (gen_rtx_SET (VOIDmode, op0,
1931 gen_rtx_NOT (DImode, temp)));
1932 }
1933 else
1934 {
1935 emit_insn (gen_rtx_SET (VOIDmode, op0,
1936 gen_safe_XOR64 (temp,
1937 (-(HOST_WIDE_INT)0x400
1938 | (low_bits & 0x3ff)))));
1939 }
1940 }
1941 }
1942
1943 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1944 unsigned HOST_WIDE_INT, int);
1945
1946 static void
1947 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1948 unsigned HOST_WIDE_INT high_bits,
1949 unsigned HOST_WIDE_INT low_immediate,
1950 int shift_count)
1951 {
1952 rtx temp2 = op0;
1953
1954 if ((high_bits & 0xfffffc00) != 0)
1955 {
1956 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1957 if ((high_bits & ~0xfffffc00) != 0)
1958 emit_insn (gen_rtx_SET (VOIDmode, op0,
1959 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1960 else
1961 temp2 = temp;
1962 }
1963 else
1964 {
1965 emit_insn (gen_safe_SET64 (temp, high_bits));
1966 temp2 = temp;
1967 }
1968
1969 /* Now shift it up into place. */
1970 emit_insn (gen_rtx_SET (VOIDmode, op0,
1971 gen_rtx_ASHIFT (DImode, temp2,
1972 GEN_INT (shift_count))));
1973
1974 /* If there is a low immediate part piece, finish up by
1975 putting that in as well. */
1976 if (low_immediate != 0)
1977 emit_insn (gen_rtx_SET (VOIDmode, op0,
1978 gen_safe_OR64 (op0, low_immediate)));
1979 }
1980
1981 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1982 unsigned HOST_WIDE_INT);
1983
1984 /* Full 64-bit constant decomposition. Even though this is the
1985 'worst' case, we still optimize a few things away. */
1986 static void
1987 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1988 unsigned HOST_WIDE_INT high_bits,
1989 unsigned HOST_WIDE_INT low_bits)
1990 {
1991 rtx sub_temp;
1992
1993 if (reload_in_progress || reload_completed)
1994 sub_temp = op0;
1995 else
1996 sub_temp = gen_reg_rtx (DImode);
1997
1998 if ((high_bits & 0xfffffc00) != 0)
1999 {
2000 sparc_emit_set_safe_HIGH64 (temp, high_bits);
2001 if ((high_bits & ~0xfffffc00) != 0)
2002 emit_insn (gen_rtx_SET (VOIDmode,
2003 sub_temp,
2004 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2005 else
2006 sub_temp = temp;
2007 }
2008 else
2009 {
2010 emit_insn (gen_safe_SET64 (temp, high_bits));
2011 sub_temp = temp;
2012 }
2013
2014 if (!reload_in_progress && !reload_completed)
2015 {
2016 rtx temp2 = gen_reg_rtx (DImode);
2017 rtx temp3 = gen_reg_rtx (DImode);
2018 rtx temp4 = gen_reg_rtx (DImode);
2019
2020 emit_insn (gen_rtx_SET (VOIDmode, temp4,
2021 gen_rtx_ASHIFT (DImode, sub_temp,
2022 GEN_INT (32))));
2023
2024 sparc_emit_set_safe_HIGH64 (temp2, low_bits);
2025 if ((low_bits & ~0xfffffc00) != 0)
2026 {
2027 emit_insn (gen_rtx_SET (VOIDmode, temp3,
2028 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2029 emit_insn (gen_rtx_SET (VOIDmode, op0,
2030 gen_rtx_PLUS (DImode, temp4, temp3)));
2031 }
2032 else
2033 {
2034 emit_insn (gen_rtx_SET (VOIDmode, op0,
2035 gen_rtx_PLUS (DImode, temp4, temp2)));
2036 }
2037 }
2038 else
2039 {
2040 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2041 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2042 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2043 int to_shift = 12;
2044
2045 /* We are in the middle of reload, so this is really
2046 painful. However we do still make an attempt to
2047 avoid emitting truly stupid code. */
2048 if (low1 != const0_rtx)
2049 {
2050 emit_insn (gen_rtx_SET (VOIDmode, op0,
2051 gen_rtx_ASHIFT (DImode, sub_temp,
2052 GEN_INT (to_shift))));
2053 emit_insn (gen_rtx_SET (VOIDmode, op0,
2054 gen_rtx_IOR (DImode, op0, low1)));
2055 sub_temp = op0;
2056 to_shift = 12;
2057 }
2058 else
2059 {
2060 to_shift += 12;
2061 }
2062 if (low2 != const0_rtx)
2063 {
2064 emit_insn (gen_rtx_SET (VOIDmode, op0,
2065 gen_rtx_ASHIFT (DImode, sub_temp,
2066 GEN_INT (to_shift))));
2067 emit_insn (gen_rtx_SET (VOIDmode, op0,
2068 gen_rtx_IOR (DImode, op0, low2)));
2069 sub_temp = op0;
2070 to_shift = 8;
2071 }
2072 else
2073 {
2074 to_shift += 8;
2075 }
2076 emit_insn (gen_rtx_SET (VOIDmode, op0,
2077 gen_rtx_ASHIFT (DImode, sub_temp,
2078 GEN_INT (to_shift))));
2079 if (low3 != const0_rtx)
2080 emit_insn (gen_rtx_SET (VOIDmode, op0,
2081 gen_rtx_IOR (DImode, op0, low3)));
2082 /* phew... */
2083 }
2084 }
2085
2086 /* Analyze a 64-bit constant for certain properties. */
2087 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2088 unsigned HOST_WIDE_INT,
2089 int *, int *, int *);
2090
2091 static void
2092 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2093 unsigned HOST_WIDE_INT low_bits,
2094 int *hbsp, int *lbsp, int *abbasp)
2095 {
2096 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2097 int i;
2098
2099 lowest_bit_set = highest_bit_set = -1;
2100 i = 0;
2101 do
2102 {
2103 if ((lowest_bit_set == -1)
2104 && ((low_bits >> i) & 1))
2105 lowest_bit_set = i;
2106 if ((highest_bit_set == -1)
2107 && ((high_bits >> (32 - i - 1)) & 1))
2108 highest_bit_set = (64 - i - 1);
2109 }
2110 while (++i < 32
2111 && ((highest_bit_set == -1)
2112 || (lowest_bit_set == -1)));
2113 if (i == 32)
2114 {
2115 i = 0;
2116 do
2117 {
2118 if ((lowest_bit_set == -1)
2119 && ((high_bits >> i) & 1))
2120 lowest_bit_set = i + 32;
2121 if ((highest_bit_set == -1)
2122 && ((low_bits >> (32 - i - 1)) & 1))
2123 highest_bit_set = 32 - i - 1;
2124 }
2125 while (++i < 32
2126 && ((highest_bit_set == -1)
2127 || (lowest_bit_set == -1)));
2128 }
2129 /* If there are no bits set this should have gone out
2130 as one instruction! */
2131 if (lowest_bit_set == -1
2132 || highest_bit_set == -1)
2133 abort ();
2134 all_bits_between_are_set = 1;
2135 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2136 {
2137 if (i < 32)
2138 {
2139 if ((low_bits & (1 << i)) != 0)
2140 continue;
2141 }
2142 else
2143 {
2144 if ((high_bits & (1 << (i - 32))) != 0)
2145 continue;
2146 }
2147 all_bits_between_are_set = 0;
2148 break;
2149 }
2150 *hbsp = highest_bit_set;
2151 *lbsp = lowest_bit_set;
2152 *abbasp = all_bits_between_are_set;
2153 }
2154
2155 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2156
2157 static int
2158 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2159 unsigned HOST_WIDE_INT low_bits)
2160 {
2161 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2162
2163 if (high_bits == 0
2164 || high_bits == 0xffffffff)
2165 return 1;
2166
2167 analyze_64bit_constant (high_bits, low_bits,
2168 &highest_bit_set, &lowest_bit_set,
2169 &all_bits_between_are_set);
2170
2171 if ((highest_bit_set == 63
2172 || lowest_bit_set == 0)
2173 && all_bits_between_are_set != 0)
2174 return 1;
2175
2176 if ((highest_bit_set - lowest_bit_set) < 21)
2177 return 1;
2178
2179 return 0;
2180 }
2181
2182 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2183 unsigned HOST_WIDE_INT,
2184 int, int);
2185
2186 static unsigned HOST_WIDE_INT
2187 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2188 unsigned HOST_WIDE_INT low_bits,
2189 int lowest_bit_set, int shift)
2190 {
2191 HOST_WIDE_INT hi, lo;
2192
2193 if (lowest_bit_set < 32)
2194 {
2195 lo = (low_bits >> lowest_bit_set) << shift;
2196 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2197 }
2198 else
2199 {
2200 lo = 0;
2201 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2202 }
2203 if (hi & lo)
2204 abort ();
2205 return (hi | lo);
2206 }
2207
2208 /* Here we are sure to be arch64 and this is an integer constant
2209 being loaded into a register. Emit the most efficient
2210 insn sequence possible. Detection of all the 1-insn cases
2211 has been done already. */
2212 void
2213 sparc_emit_set_const64 (rtx op0, rtx op1)
2214 {
2215 unsigned HOST_WIDE_INT high_bits, low_bits;
2216 int lowest_bit_set, highest_bit_set;
2217 int all_bits_between_are_set;
2218 rtx temp = 0;
2219
2220 /* Sanity check that we know what we are working with. */
2221 if (! TARGET_ARCH64)
2222 abort ();
2223
2224 if (GET_CODE (op0) != SUBREG)
2225 {
2226 if (GET_CODE (op0) != REG
2227 || (REGNO (op0) >= SPARC_FIRST_FP_REG
2228 && REGNO (op0) <= SPARC_LAST_V9_FP_REG))
2229 abort ();
2230 }
2231
2232 if (reload_in_progress || reload_completed)
2233 temp = op0;
2234
2235 if (GET_CODE (op1) != CONST_DOUBLE
2236 && GET_CODE (op1) != CONST_INT)
2237 {
2238 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2239 return;
2240 }
2241
2242 if (! temp)
2243 temp = gen_reg_rtx (DImode);
2244
2245 if (GET_CODE (op1) == CONST_DOUBLE)
2246 {
2247 #if HOST_BITS_PER_WIDE_INT == 64
2248 high_bits = (CONST_DOUBLE_LOW (op1) >> 32) & 0xffffffff;
2249 low_bits = CONST_DOUBLE_LOW (op1) & 0xffffffff;
2250 #else
2251 high_bits = CONST_DOUBLE_HIGH (op1);
2252 low_bits = CONST_DOUBLE_LOW (op1);
2253 #endif
2254 }
2255 else
2256 {
2257 #if HOST_BITS_PER_WIDE_INT == 64
2258 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2259 low_bits = (INTVAL (op1) & 0xffffffff);
2260 #else
2261 high_bits = ((INTVAL (op1) < 0) ?
2262 0xffffffff :
2263 0x00000000);
2264 low_bits = INTVAL (op1);
2265 #endif
2266 }
2267
2268 /* low_bits bits 0 --> 31
2269 high_bits bits 32 --> 63 */
2270
2271 analyze_64bit_constant (high_bits, low_bits,
2272 &highest_bit_set, &lowest_bit_set,
2273 &all_bits_between_are_set);
2274
2275 /* First try for a 2-insn sequence. */
2276
2277 /* These situations are preferred because the optimizer can
2278 * do more things with them:
2279 * 1) mov -1, %reg
2280 * sllx %reg, shift, %reg
2281 * 2) mov -1, %reg
2282 * srlx %reg, shift, %reg
2283 * 3) mov some_small_const, %reg
2284 * sllx %reg, shift, %reg
2285 */
2286 if (((highest_bit_set == 63
2287 || lowest_bit_set == 0)
2288 && all_bits_between_are_set != 0)
2289 || ((highest_bit_set - lowest_bit_set) < 12))
2290 {
2291 HOST_WIDE_INT the_const = -1;
2292 int shift = lowest_bit_set;
2293
2294 if ((highest_bit_set != 63
2295 && lowest_bit_set != 0)
2296 || all_bits_between_are_set == 0)
2297 {
2298 the_const =
2299 create_simple_focus_bits (high_bits, low_bits,
2300 lowest_bit_set, 0);
2301 }
2302 else if (lowest_bit_set == 0)
2303 shift = -(63 - highest_bit_set);
2304
2305 if (! SPARC_SIMM13_P (the_const))
2306 abort ();
2307
2308 emit_insn (gen_safe_SET64 (temp, the_const));
2309 if (shift > 0)
2310 emit_insn (gen_rtx_SET (VOIDmode,
2311 op0,
2312 gen_rtx_ASHIFT (DImode,
2313 temp,
2314 GEN_INT (shift))));
2315 else if (shift < 0)
2316 emit_insn (gen_rtx_SET (VOIDmode,
2317 op0,
2318 gen_rtx_LSHIFTRT (DImode,
2319 temp,
2320 GEN_INT (-shift))));
2321 else
2322 abort ();
2323 return;
2324 }
2325
2326 /* Now a range of 22 or less bits set somewhere.
2327 * 1) sethi %hi(focus_bits), %reg
2328 * sllx %reg, shift, %reg
2329 * 2) sethi %hi(focus_bits), %reg
2330 * srlx %reg, shift, %reg
2331 */
2332 if ((highest_bit_set - lowest_bit_set) < 21)
2333 {
2334 unsigned HOST_WIDE_INT focus_bits =
2335 create_simple_focus_bits (high_bits, low_bits,
2336 lowest_bit_set, 10);
2337
2338 if (! SPARC_SETHI_P (focus_bits))
2339 abort ();
2340
2341 sparc_emit_set_safe_HIGH64 (temp, focus_bits);
2342
2343 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2344 if (lowest_bit_set < 10)
2345 emit_insn (gen_rtx_SET (VOIDmode,
2346 op0,
2347 gen_rtx_LSHIFTRT (DImode, temp,
2348 GEN_INT (10 - lowest_bit_set))));
2349 else if (lowest_bit_set > 10)
2350 emit_insn (gen_rtx_SET (VOIDmode,
2351 op0,
2352 gen_rtx_ASHIFT (DImode, temp,
2353 GEN_INT (lowest_bit_set - 10))));
2354 else
2355 abort ();
2356 return;
2357 }
2358
2359 /* 1) sethi %hi(low_bits), %reg
2360 * or %reg, %lo(low_bits), %reg
2361 * 2) sethi %hi(~low_bits), %reg
2362 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2363 */
2364 if (high_bits == 0
2365 || high_bits == 0xffffffff)
2366 {
2367 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2368 (high_bits == 0xffffffff));
2369 return;
2370 }
2371
2372 /* Now, try 3-insn sequences. */
2373
2374 /* 1) sethi %hi(high_bits), %reg
2375 * or %reg, %lo(high_bits), %reg
2376 * sllx %reg, 32, %reg
2377 */
2378 if (low_bits == 0)
2379 {
2380 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2381 return;
2382 }
2383
2384 /* We may be able to do something quick
2385 when the constant is negated, so try that. */
2386 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2387 (~low_bits) & 0xfffffc00))
2388 {
2389 /* NOTE: The trailing bits get XOR'd so we need the
2390 non-negated bits, not the negated ones. */
2391 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2392
2393 if ((((~high_bits) & 0xffffffff) == 0
2394 && ((~low_bits) & 0x80000000) == 0)
2395 || (((~high_bits) & 0xffffffff) == 0xffffffff
2396 && ((~low_bits) & 0x80000000) != 0))
2397 {
2398 int fast_int = (~low_bits & 0xffffffff);
2399
2400 if ((SPARC_SETHI_P (fast_int)
2401 && (~high_bits & 0xffffffff) == 0)
2402 || SPARC_SIMM13_P (fast_int))
2403 emit_insn (gen_safe_SET64 (temp, fast_int));
2404 else
2405 sparc_emit_set_const64 (temp, GEN_INT64 (fast_int));
2406 }
2407 else
2408 {
2409 rtx negated_const;
2410 #if HOST_BITS_PER_WIDE_INT == 64
2411 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2412 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2413 #else
2414 negated_const = immed_double_const ((~low_bits) & 0xfffffc00,
2415 (~high_bits) & 0xffffffff,
2416 DImode);
2417 #endif
2418 sparc_emit_set_const64 (temp, negated_const);
2419 }
2420
2421 /* If we are XOR'ing with -1, then we should emit a one's complement
2422 instead. This way the combiner will notice logical operations
2423 such as ANDN later on and substitute. */
2424 if (trailing_bits == 0x3ff)
2425 {
2426 emit_insn (gen_rtx_SET (VOIDmode, op0,
2427 gen_rtx_NOT (DImode, temp)));
2428 }
2429 else
2430 {
2431 emit_insn (gen_rtx_SET (VOIDmode,
2432 op0,
2433 gen_safe_XOR64 (temp,
2434 (-0x400 | trailing_bits))));
2435 }
2436 return;
2437 }
2438
2439 /* 1) sethi %hi(xxx), %reg
2440 * or %reg, %lo(xxx), %reg
2441 * sllx %reg, yyy, %reg
2442 *
2443 * ??? This is just a generalized version of the low_bits==0
2444 * thing above, FIXME...
2445 */
2446 if ((highest_bit_set - lowest_bit_set) < 32)
2447 {
2448 unsigned HOST_WIDE_INT focus_bits =
2449 create_simple_focus_bits (high_bits, low_bits,
2450 lowest_bit_set, 0);
2451
2452 /* We can't get here in this state. */
2453 if (highest_bit_set < 32
2454 || lowest_bit_set >= 32)
2455 abort ();
2456
2457 /* So what we know is that the set bits straddle the
2458 middle of the 64-bit word. */
2459 sparc_emit_set_const64_quick2 (op0, temp,
2460 focus_bits, 0,
2461 lowest_bit_set);
2462 return;
2463 }
2464
2465 /* 1) sethi %hi(high_bits), %reg
2466 * or %reg, %lo(high_bits), %reg
2467 * sllx %reg, 32, %reg
2468 * or %reg, low_bits, %reg
2469 */
2470 if (SPARC_SIMM13_P(low_bits)
2471 && ((int)low_bits > 0))
2472 {
2473 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2474 return;
2475 }
2476
2477 /* The easiest way when all else fails, is full decomposition. */
2478 #if 0
2479 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
2480 high_bits, low_bits, ~high_bits, ~low_bits);
2481 #endif
2482 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2483 }
2484
2485 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2486 return the mode to be used for the comparison. For floating-point,
2487 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2488 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2489 processing is needed. */
2490
2491 enum machine_mode
2492 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2493 {
2494 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2495 {
2496 switch (op)
2497 {
2498 case EQ:
2499 case NE:
2500 case UNORDERED:
2501 case ORDERED:
2502 case UNLT:
2503 case UNLE:
2504 case UNGT:
2505 case UNGE:
2506 case UNEQ:
2507 case LTGT:
2508 return CCFPmode;
2509
2510 case LT:
2511 case LE:
2512 case GT:
2513 case GE:
2514 return CCFPEmode;
2515
2516 default:
2517 abort ();
2518 }
2519 }
2520 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2521 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2522 {
2523 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2524 return CCX_NOOVmode;
2525 else
2526 return CC_NOOVmode;
2527 }
2528 else
2529 {
2530 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2531 return CCXmode;
2532 else
2533 return CCmode;
2534 }
2535 }
2536
2537 /* X and Y are two things to compare using CODE. Emit the compare insn and
2538 return the rtx for the cc reg in the proper mode. */
2539
2540 rtx
2541 gen_compare_reg (enum rtx_code code, rtx x, rtx y)
2542 {
2543 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2544 rtx cc_reg;
2545
2546 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2547 fcc regs (cse can't tell they're really call clobbered regs and will
2548 remove a duplicate comparison even if there is an intervening function
2549 call - it will then try to reload the cc reg via an int reg which is why
2550 we need the movcc patterns). It is possible to provide the movcc
2551 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2552 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2553 to tell cse that CCFPE mode registers (even pseudos) are call
2554 clobbered. */
2555
2556 /* ??? This is an experiment. Rather than making changes to cse which may
2557 or may not be easy/clean, we do our own cse. This is possible because
2558 we will generate hard registers. Cse knows they're call clobbered (it
2559 doesn't know the same thing about pseudos). If we guess wrong, no big
2560 deal, but if we win, great! */
2561
2562 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2563 #if 1 /* experiment */
2564 {
2565 int reg;
2566 /* We cycle through the registers to ensure they're all exercised. */
2567 static int next_fcc_reg = 0;
2568 /* Previous x,y for each fcc reg. */
2569 static rtx prev_args[4][2];
2570
2571 /* Scan prev_args for x,y. */
2572 for (reg = 0; reg < 4; reg++)
2573 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2574 break;
2575 if (reg == 4)
2576 {
2577 reg = next_fcc_reg;
2578 prev_args[reg][0] = x;
2579 prev_args[reg][1] = y;
2580 next_fcc_reg = (next_fcc_reg + 1) & 3;
2581 }
2582 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2583 }
2584 #else
2585 cc_reg = gen_reg_rtx (mode);
2586 #endif /* ! experiment */
2587 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2588 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2589 else
2590 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2591
2592 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2593 gen_rtx_COMPARE (mode, x, y)));
2594
2595 return cc_reg;
2596 }
2597
2598 /* This function is used for v9 only.
2599 CODE is the code for an Scc's comparison.
2600 OPERANDS[0] is the target of the Scc insn.
2601 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2602 been generated yet).
2603
2604 This function is needed to turn
2605
2606 (set (reg:SI 110)
2607 (gt (reg:CCX 100 %icc)
2608 (const_int 0)))
2609 into
2610 (set (reg:SI 110)
2611 (gt:DI (reg:CCX 100 %icc)
2612 (const_int 0)))
2613
2614 IE: The instruction recognizer needs to see the mode of the comparison to
2615 find the right instruction. We could use "gt:DI" right in the
2616 define_expand, but leaving it out allows us to handle DI, SI, etc.
2617
2618 We refer to the global sparc compare operands sparc_compare_op0 and
2619 sparc_compare_op1. */
2620
2621 int
2622 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2623 {
2624 rtx temp, op0, op1;
2625
2626 if (! TARGET_ARCH64
2627 && (GET_MODE (sparc_compare_op0) == DImode
2628 || GET_MODE (operands[0]) == DImode))
2629 return 0;
2630
2631 op0 = sparc_compare_op0;
2632 op1 = sparc_compare_op1;
2633
2634 /* Try to use the movrCC insns. */
2635 if (TARGET_ARCH64
2636 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
2637 && op1 == const0_rtx
2638 && v9_regcmp_p (compare_code))
2639 {
2640 /* Special case for op0 != 0. This can be done with one instruction if
2641 operands[0] == sparc_compare_op0. */
2642
2643 if (compare_code == NE
2644 && GET_MODE (operands[0]) == DImode
2645 && rtx_equal_p (op0, operands[0]))
2646 {
2647 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2648 gen_rtx_IF_THEN_ELSE (DImode,
2649 gen_rtx_fmt_ee (compare_code, DImode,
2650 op0, const0_rtx),
2651 const1_rtx,
2652 operands[0])));
2653 return 1;
2654 }
2655
2656 if (reg_overlap_mentioned_p (operands[0], op0))
2657 {
2658 /* Handle the case where operands[0] == sparc_compare_op0.
2659 We "early clobber" the result. */
2660 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2661 emit_move_insn (op0, sparc_compare_op0);
2662 }
2663
2664 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2665 if (GET_MODE (op0) != DImode)
2666 {
2667 temp = gen_reg_rtx (DImode);
2668 convert_move (temp, op0, 0);
2669 }
2670 else
2671 temp = op0;
2672 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2673 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2674 gen_rtx_fmt_ee (compare_code, DImode,
2675 temp, const0_rtx),
2676 const1_rtx,
2677 operands[0])));
2678 return 1;
2679 }
2680 else
2681 {
2682 operands[1] = gen_compare_reg (compare_code, op0, op1);
2683
2684 switch (GET_MODE (operands[1]))
2685 {
2686 case CCmode :
2687 case CCXmode :
2688 case CCFPEmode :
2689 case CCFPmode :
2690 break;
2691 default :
2692 abort ();
2693 }
2694 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2695 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2696 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2697 gen_rtx_fmt_ee (compare_code,
2698 GET_MODE (operands[1]),
2699 operands[1], const0_rtx),
2700 const1_rtx, operands[0])));
2701 return 1;
2702 }
2703 }
2704
2705 /* Emit a conditional jump insn for the v9 architecture using comparison code
2706 CODE and jump target LABEL.
2707 This function exists to take advantage of the v9 brxx insns. */
2708
2709 void
2710 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2711 {
2712 emit_jump_insn (gen_rtx_SET (VOIDmode,
2713 pc_rtx,
2714 gen_rtx_IF_THEN_ELSE (VOIDmode,
2715 gen_rtx_fmt_ee (code, GET_MODE (op0),
2716 op0, const0_rtx),
2717 gen_rtx_LABEL_REF (VOIDmode, label),
2718 pc_rtx)));
2719 }
2720
2721 /* Generate a DFmode part of a hard TFmode register.
2722 REG is the TFmode hard register, LOW is 1 for the
2723 low 64bit of the register and 0 otherwise.
2724 */
2725 rtx
2726 gen_df_reg (rtx reg, int low)
2727 {
2728 int regno = REGNO (reg);
2729
2730 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2731 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2732 return gen_rtx_REG (DFmode, regno);
2733 }
2734 \f
2735 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2736 Unlike normal calls, TFmode operands are passed by reference. It is
2737 assumed that no more than 3 operands are required. */
2738
2739 static void
2740 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2741 {
2742 rtx ret_slot = NULL, arg[3], func_sym;
2743 int i;
2744
2745 /* We only expect to be called for conversions, unary, and binary ops. */
2746 if (nargs < 2 || nargs > 3)
2747 abort ();
2748
2749 for (i = 0; i < nargs; ++i)
2750 {
2751 rtx this_arg = operands[i];
2752 rtx this_slot;
2753
2754 /* TFmode arguments and return values are passed by reference. */
2755 if (GET_MODE (this_arg) == TFmode)
2756 {
2757 int force_stack_temp;
2758
2759 force_stack_temp = 0;
2760 if (TARGET_BUGGY_QP_LIB && i == 0)
2761 force_stack_temp = 1;
2762
2763 if (GET_CODE (this_arg) == MEM
2764 && ! force_stack_temp)
2765 this_arg = XEXP (this_arg, 0);
2766 else if (CONSTANT_P (this_arg)
2767 && ! force_stack_temp)
2768 {
2769 this_slot = force_const_mem (TFmode, this_arg);
2770 this_arg = XEXP (this_slot, 0);
2771 }
2772 else
2773 {
2774 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2775
2776 /* Operand 0 is the return value. We'll copy it out later. */
2777 if (i > 0)
2778 emit_move_insn (this_slot, this_arg);
2779 else
2780 ret_slot = this_slot;
2781
2782 this_arg = XEXP (this_slot, 0);
2783 }
2784 }
2785
2786 arg[i] = this_arg;
2787 }
2788
2789 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2790
2791 if (GET_MODE (operands[0]) == TFmode)
2792 {
2793 if (nargs == 2)
2794 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2795 arg[0], GET_MODE (arg[0]),
2796 arg[1], GET_MODE (arg[1]));
2797 else
2798 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2799 arg[0], GET_MODE (arg[0]),
2800 arg[1], GET_MODE (arg[1]),
2801 arg[2], GET_MODE (arg[2]));
2802
2803 if (ret_slot)
2804 emit_move_insn (operands[0], ret_slot);
2805 }
2806 else
2807 {
2808 rtx ret;
2809
2810 if (nargs != 2)
2811 abort ();
2812
2813 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2814 GET_MODE (operands[0]), 1,
2815 arg[1], GET_MODE (arg[1]));
2816
2817 if (ret != operands[0])
2818 emit_move_insn (operands[0], ret);
2819 }
2820 }
2821
2822 /* Expand soft-float TFmode calls to sparc abi routines. */
2823
2824 static void
2825 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2826 {
2827 const char *func;
2828
2829 switch (code)
2830 {
2831 case PLUS:
2832 func = "_Qp_add";
2833 break;
2834 case MINUS:
2835 func = "_Qp_sub";
2836 break;
2837 case MULT:
2838 func = "_Qp_mul";
2839 break;
2840 case DIV:
2841 func = "_Qp_div";
2842 break;
2843 default:
2844 abort ();
2845 }
2846
2847 emit_soft_tfmode_libcall (func, 3, operands);
2848 }
2849
2850 static void
2851 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2852 {
2853 const char *func;
2854
2855 switch (code)
2856 {
2857 case SQRT:
2858 func = "_Qp_sqrt";
2859 break;
2860 default:
2861 abort ();
2862 }
2863
2864 emit_soft_tfmode_libcall (func, 2, operands);
2865 }
2866
2867 static void
2868 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2869 {
2870 const char *func;
2871
2872 switch (code)
2873 {
2874 case FLOAT_EXTEND:
2875 switch (GET_MODE (operands[1]))
2876 {
2877 case SFmode:
2878 func = "_Qp_stoq";
2879 break;
2880 case DFmode:
2881 func = "_Qp_dtoq";
2882 break;
2883 default:
2884 abort ();
2885 }
2886 break;
2887
2888 case FLOAT_TRUNCATE:
2889 switch (GET_MODE (operands[0]))
2890 {
2891 case SFmode:
2892 func = "_Qp_qtos";
2893 break;
2894 case DFmode:
2895 func = "_Qp_qtod";
2896 break;
2897 default:
2898 abort ();
2899 }
2900 break;
2901
2902 case FLOAT:
2903 switch (GET_MODE (operands[1]))
2904 {
2905 case SImode:
2906 func = "_Qp_itoq";
2907 break;
2908 case DImode:
2909 func = "_Qp_xtoq";
2910 break;
2911 default:
2912 abort ();
2913 }
2914 break;
2915
2916 case UNSIGNED_FLOAT:
2917 switch (GET_MODE (operands[1]))
2918 {
2919 case SImode:
2920 func = "_Qp_uitoq";
2921 break;
2922 case DImode:
2923 func = "_Qp_uxtoq";
2924 break;
2925 default:
2926 abort ();
2927 }
2928 break;
2929
2930 case FIX:
2931 switch (GET_MODE (operands[0]))
2932 {
2933 case SImode:
2934 func = "_Qp_qtoi";
2935 break;
2936 case DImode:
2937 func = "_Qp_qtox";
2938 break;
2939 default:
2940 abort ();
2941 }
2942 break;
2943
2944 case UNSIGNED_FIX:
2945 switch (GET_MODE (operands[0]))
2946 {
2947 case SImode:
2948 func = "_Qp_qtoui";
2949 break;
2950 case DImode:
2951 func = "_Qp_qtoux";
2952 break;
2953 default:
2954 abort ();
2955 }
2956 break;
2957
2958 default:
2959 abort ();
2960 }
2961
2962 emit_soft_tfmode_libcall (func, 2, operands);
2963 }
2964
2965 /* Expand a hard-float tfmode operation. All arguments must be in
2966 registers. */
2967
2968 static void
2969 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2970 {
2971 rtx op, dest;
2972
2973 if (GET_RTX_CLASS (code) == RTX_UNARY)
2974 {
2975 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2976 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2977 }
2978 else
2979 {
2980 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2981 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2982 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2983 operands[1], operands[2]);
2984 }
2985
2986 if (register_operand (operands[0], VOIDmode))
2987 dest = operands[0];
2988 else
2989 dest = gen_reg_rtx (GET_MODE (operands[0]));
2990
2991 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2992
2993 if (dest != operands[0])
2994 emit_move_insn (operands[0], dest);
2995 }
2996
2997 void
2998 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2999 {
3000 if (TARGET_HARD_QUAD)
3001 emit_hard_tfmode_operation (code, operands);
3002 else
3003 emit_soft_tfmode_binop (code, operands);
3004 }
3005
3006 void
3007 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3008 {
3009 if (TARGET_HARD_QUAD)
3010 emit_hard_tfmode_operation (code, operands);
3011 else
3012 emit_soft_tfmode_unop (code, operands);
3013 }
3014
3015 void
3016 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3017 {
3018 if (TARGET_HARD_QUAD)
3019 emit_hard_tfmode_operation (code, operands);
3020 else
3021 emit_soft_tfmode_cvt (code, operands);
3022 }
3023 \f
3024 /* Return nonzero if a branch/jump/call instruction will be emitting
3025 nop into its delay slot. */
3026
3027 int
3028 empty_delay_slot (rtx insn)
3029 {
3030 rtx seq;
3031
3032 /* If no previous instruction (should not happen), return true. */
3033 if (PREV_INSN (insn) == NULL)
3034 return 1;
3035
3036 seq = NEXT_INSN (PREV_INSN (insn));
3037 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3038 return 0;
3039
3040 return 1;
3041 }
3042
3043 /* Return nonzero if TRIAL can go into the call delay slot. */
3044
3045 int
3046 tls_call_delay (rtx trial)
3047 {
3048 rtx pat, unspec;
3049
3050 /* Binutils allows
3051 call __tls_get_addr, %tgd_call (foo)
3052 add %l7, %o0, %o0, %tgd_add (foo)
3053 while Sun as/ld does not. */
3054 if (TARGET_GNU_TLS || !TARGET_TLS)
3055 return 1;
3056
3057 pat = PATTERN (trial);
3058 if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != PLUS)
3059 return 1;
3060
3061 unspec = XEXP (SET_DEST (pat), 1);
3062 if (GET_CODE (unspec) != UNSPEC
3063 || (XINT (unspec, 1) != UNSPEC_TLSGD
3064 && XINT (unspec, 1) != UNSPEC_TLSLDM))
3065 return 1;
3066
3067 return 0;
3068 }
3069
3070 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3071 instruction. RETURN_P is true if the v9 variant 'return' is to be
3072 considered in the test too.
3073
3074 TRIAL must be a SET whose destination is a REG appropriate for the
3075 'restore' instruction or, if RETURN_P is true, for the 'return'
3076 instruction. */
3077
3078 static int
3079 eligible_for_restore_insn (rtx trial, bool return_p)
3080 {
3081 rtx pat = PATTERN (trial);
3082 rtx src = SET_SRC (pat);
3083
3084 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3085 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3086 && arith_operand (src, GET_MODE (src)))
3087 {
3088 if (TARGET_ARCH64)
3089 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3090 else
3091 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3092 }
3093
3094 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3095 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3096 && arith_double_operand (src, GET_MODE (src)))
3097 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3098
3099 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3100 else if (! TARGET_FPU && register_operand (src, SFmode))
3101 return 1;
3102
3103 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3104 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3105 return 1;
3106
3107 /* If we have the 'return' instruction, anything that does not use
3108 local or output registers and can go into a delay slot wins. */
3109 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
3110 && (get_attr_in_uncond_branch_delay (trial)
3111 == IN_UNCOND_BRANCH_DELAY_TRUE))
3112 return 1;
3113
3114 /* The 'restore src1,src2,dest' pattern for SImode. */
3115 else if (GET_CODE (src) == PLUS
3116 && register_operand (XEXP (src, 0), SImode)
3117 && arith_operand (XEXP (src, 1), SImode))
3118 return 1;
3119
3120 /* The 'restore src1,src2,dest' pattern for DImode. */
3121 else if (GET_CODE (src) == PLUS
3122 && register_operand (XEXP (src, 0), DImode)
3123 && arith_double_operand (XEXP (src, 1), DImode))
3124 return 1;
3125
3126 /* The 'restore src1,%lo(src2),dest' pattern. */
3127 else if (GET_CODE (src) == LO_SUM
3128 && ! TARGET_CM_MEDMID
3129 && ((register_operand (XEXP (src, 0), SImode)
3130 && immediate_operand (XEXP (src, 1), SImode))
3131 || (TARGET_ARCH64
3132 && register_operand (XEXP (src, 0), DImode)
3133 && immediate_operand (XEXP (src, 1), DImode))))
3134 return 1;
3135
3136 /* The 'restore src,src,dest' pattern. */
3137 else if (GET_CODE (src) == ASHIFT
3138 && (register_operand (XEXP (src, 0), SImode)
3139 || register_operand (XEXP (src, 0), DImode))
3140 && XEXP (src, 1) == const1_rtx)
3141 return 1;
3142
3143 return 0;
3144 }
3145
3146 /* Return nonzero if TRIAL can go into the function return's
3147 delay slot. */
3148
3149 int
3150 eligible_for_return_delay (rtx trial)
3151 {
3152 rtx pat;
3153
3154 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3155 return 0;
3156
3157 if (get_attr_length (trial) != 1)
3158 return 0;
3159
3160 /* If there are any call-saved registers, we should scan TRIAL if it
3161 does not reference them. For now just make it easy. */
3162 if (num_gfregs)
3163 return 0;
3164
3165 /* If the function uses __builtin_eh_return, the eh_return machinery
3166 occupies the delay slot. */
3167 if (current_function_calls_eh_return)
3168 return 0;
3169
3170 /* In the case of a true leaf function, anything can go into the slot. */
3171 if (sparc_leaf_function_p)
3172 return get_attr_in_uncond_branch_delay (trial)
3173 == IN_UNCOND_BRANCH_DELAY_TRUE;
3174
3175 pat = PATTERN (trial);
3176
3177 /* Otherwise, only operations which can be done in tandem with
3178 a `restore' or `return' insn can go into the delay slot. */
3179 if (GET_CODE (SET_DEST (pat)) != REG
3180 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
3181 return 0;
3182
3183 /* If this instruction sets up floating point register and we have a return
3184 instruction, it can probably go in. But restore will not work
3185 with FP_REGS. */
3186 if (REGNO (SET_DEST (pat)) >= 32)
3187 return (TARGET_V9
3188 && ! epilogue_renumber (&pat, 1)
3189 && (get_attr_in_uncond_branch_delay (trial)
3190 == IN_UNCOND_BRANCH_DELAY_TRUE));
3191
3192 return eligible_for_restore_insn (trial, true);
3193 }
3194
3195 /* Return nonzero if TRIAL can go into the sibling call's
3196 delay slot. */
3197
3198 int
3199 eligible_for_sibcall_delay (rtx trial)
3200 {
3201 rtx pat;
3202
3203 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3204 return 0;
3205
3206 if (get_attr_length (trial) != 1)
3207 return 0;
3208
3209 pat = PATTERN (trial);
3210
3211 if (sparc_leaf_function_p)
3212 {
3213 /* If the tail call is done using the call instruction,
3214 we have to restore %o7 in the delay slot. */
3215 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3216 return 0;
3217
3218 /* %g1 is used to build the function address */
3219 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3220 return 0;
3221
3222 return 1;
3223 }
3224
3225 /* Otherwise, only operations which can be done in tandem with
3226 a `restore' insn can go into the delay slot. */
3227 if (GET_CODE (SET_DEST (pat)) != REG
3228 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3229 || REGNO (SET_DEST (pat)) >= 32)
3230 return 0;
3231
3232 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3233 in most cases. */
3234 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3235 return 0;
3236
3237 return eligible_for_restore_insn (trial, false);
3238 }
3239
3240 int
3241 short_branch (int uid1, int uid2)
3242 {
3243 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
3244
3245 /* Leave a few words of "slop". */
3246 if (delta >= -1023 && delta <= 1022)
3247 return 1;
3248
3249 return 0;
3250 }
3251
3252 /* Return nonzero if REG is not used after INSN.
3253 We assume REG is a reload reg, and therefore does
3254 not live past labels or calls or jumps. */
3255 int
3256 reg_unused_after (rtx reg, rtx insn)
3257 {
3258 enum rtx_code code, prev_code = UNKNOWN;
3259
3260 while ((insn = NEXT_INSN (insn)))
3261 {
3262 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
3263 return 1;
3264
3265 code = GET_CODE (insn);
3266 if (GET_CODE (insn) == CODE_LABEL)
3267 return 1;
3268
3269 if (INSN_P (insn))
3270 {
3271 rtx set = single_set (insn);
3272 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
3273 if (set && in_src)
3274 return 0;
3275 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
3276 return 1;
3277 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
3278 return 0;
3279 }
3280 prev_code = code;
3281 }
3282 return 1;
3283 }
3284 \f
3285 /* Determine if it's legal to put X into the constant pool. This
3286 is not possible if X contains the address of a symbol that is
3287 not constant (TLS) or not known at final link time (PIC). */
3288
3289 static bool
3290 sparc_cannot_force_const_mem (rtx x)
3291 {
3292 switch (GET_CODE (x))
3293 {
3294 case CONST_INT:
3295 case CONST_DOUBLE:
3296 /* Accept all non-symbolic constants. */
3297 return false;
3298
3299 case LABEL_REF:
3300 /* Labels are OK iff we are non-PIC. */
3301 return flag_pic != 0;
3302
3303 case SYMBOL_REF:
3304 /* 'Naked' TLS symbol references are never OK,
3305 non-TLS symbols are OK iff we are non-PIC. */
3306 if (SYMBOL_REF_TLS_MODEL (x))
3307 return true;
3308 else
3309 return flag_pic != 0;
3310
3311 case CONST:
3312 return sparc_cannot_force_const_mem (XEXP (x, 0));
3313 case PLUS:
3314 case MINUS:
3315 return sparc_cannot_force_const_mem (XEXP (x, 0))
3316 || sparc_cannot_force_const_mem (XEXP (x, 1));
3317 case UNSPEC:
3318 return true;
3319 default:
3320 abort ();
3321 }
3322 }
3323 \f
3324 /* The table we use to reference PIC data. */
3325 static GTY(()) rtx global_offset_table;
3326
3327 /* The function we use to get at it. */
3328 static GTY(()) rtx add_pc_to_pic_symbol;
3329 static GTY(()) char add_pc_to_pic_symbol_name[256];
3330
3331 /* Ensure that we are not using patterns that are not OK with PIC. */
3332
3333 int
3334 check_pic (int i)
3335 {
3336 switch (flag_pic)
3337 {
3338 case 1:
3339 if (GET_CODE (recog_data.operand[i]) == SYMBOL_REF
3340 || (GET_CODE (recog_data.operand[i]) == CONST
3341 && ! (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
3342 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
3343 == global_offset_table)
3344 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
3345 == CONST))))
3346 abort ();
3347 case 2:
3348 default:
3349 return 1;
3350 }
3351 }
3352
3353 /* Return true if X is an address which needs a temporary register when
3354 reloaded while generating PIC code. */
3355
3356 int
3357 pic_address_needs_scratch (rtx x)
3358 {
3359 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3360 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3361 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3362 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3363 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3364 return 1;
3365
3366 return 0;
3367 }
3368
3369 /* Determine if a given RTX is a valid constant. We already know this
3370 satisfies CONSTANT_P. */
3371
3372 bool
3373 legitimate_constant_p (rtx x)
3374 {
3375 rtx inner;
3376
3377 switch (GET_CODE (x))
3378 {
3379 case SYMBOL_REF:
3380 /* TLS symbols are not constant. */
3381 if (SYMBOL_REF_TLS_MODEL (x))
3382 return false;
3383 break;
3384
3385 case CONST:
3386 inner = XEXP (x, 0);
3387
3388 /* Offsets of TLS symbols are never valid.
3389 Discourage CSE from creating them. */
3390 if (GET_CODE (inner) == PLUS
3391 && tls_symbolic_operand (XEXP (inner, 0)))
3392 return false;
3393 break;
3394
3395 case CONST_DOUBLE:
3396 if (GET_MODE (x) == VOIDmode)
3397 return true;
3398
3399 /* Floating point constants are generally not ok.
3400 The only exception is 0.0 in VIS. */
3401 if (TARGET_VIS
3402 && (GET_MODE (x) == SFmode
3403 || GET_MODE (x) == DFmode
3404 || GET_MODE (x) == TFmode)
3405 && fp_zero_operand (x, GET_MODE (x)))
3406 return true;
3407
3408 return false;
3409
3410 default:
3411 break;
3412 }
3413
3414 return true;
3415 }
3416
3417 /* Determine if a given RTX is a valid constant address. */
3418
3419 bool
3420 constant_address_p (rtx x)
3421 {
3422 switch (GET_CODE (x))
3423 {
3424 case LABEL_REF:
3425 case CONST_INT:
3426 case HIGH:
3427 return true;
3428
3429 case CONST:
3430 if (flag_pic && pic_address_needs_scratch (x))
3431 return false;
3432 return legitimate_constant_p (x);
3433
3434 case SYMBOL_REF:
3435 return !flag_pic && legitimate_constant_p (x);
3436
3437 default:
3438 return false;
3439 }
3440 }
3441
3442 /* Nonzero if the constant value X is a legitimate general operand
3443 when generating PIC code. It is given that flag_pic is on and
3444 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3445
3446 bool
3447 legitimate_pic_operand_p (rtx x)
3448 {
3449 if (pic_address_needs_scratch (x))
3450 return false;
3451 if (tls_symbolic_operand (x)
3452 || (GET_CODE (x) == CONST
3453 && GET_CODE (XEXP (x, 0)) == PLUS
3454 && tls_symbolic_operand (XEXP (XEXP (x, 0), 0))))
3455 return false;
3456 return true;
3457 }
3458
3459 /* Return nonzero if ADDR is a valid memory address.
3460 STRICT specifies whether strict register checking applies. */
3461
3462 int
3463 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
3464 {
3465 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL, imm2;
3466
3467 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3468 rs1 = addr;
3469 else if (GET_CODE (addr) == PLUS)
3470 {
3471 rs1 = XEXP (addr, 0);
3472 rs2 = XEXP (addr, 1);
3473
3474 /* Canonicalize. REG comes first, if there are no regs,
3475 LO_SUM comes first. */
3476 if (!REG_P (rs1)
3477 && GET_CODE (rs1) != SUBREG
3478 && (REG_P (rs2)
3479 || GET_CODE (rs2) == SUBREG
3480 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3481 {
3482 rs1 = XEXP (addr, 1);
3483 rs2 = XEXP (addr, 0);
3484 }
3485
3486 if ((flag_pic == 1
3487 && rs1 == pic_offset_table_rtx
3488 && !REG_P (rs2)
3489 && GET_CODE (rs2) != SUBREG
3490 && GET_CODE (rs2) != LO_SUM
3491 && GET_CODE (rs2) != MEM
3492 && !tls_symbolic_operand (rs2)
3493 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3494 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3495 || ((REG_P (rs1)
3496 || GET_CODE (rs1) == SUBREG)
3497 && RTX_OK_FOR_OFFSET_P (rs2)))
3498 {
3499 imm1 = rs2;
3500 rs2 = NULL;
3501 }
3502 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3503 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3504 {
3505 /* We prohibit REG + REG for TFmode when there are no quad move insns
3506 and we consequently need to split. We do this because REG+REG
3507 is not an offsettable address. If we get the situation in reload
3508 where source and destination of a movtf pattern are both MEMs with
3509 REG+REG address, then only one of them gets converted to an
3510 offsettable address. */
3511 if (mode == TFmode
3512 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3513 return 0;
3514
3515 /* We prohibit REG + REG on ARCH32 if not optimizing for
3516 DFmode/DImode because then mem_min_alignment is likely to be zero
3517 after reload and the forced split would lack a matching splitter
3518 pattern. */
3519 if (TARGET_ARCH32 && !optimize
3520 && (mode == DFmode || mode == DImode))
3521 return 0;
3522 }
3523 else if (USE_AS_OFFSETABLE_LO10
3524 && GET_CODE (rs1) == LO_SUM
3525 && TARGET_ARCH64
3526 && ! TARGET_CM_MEDMID
3527 && RTX_OK_FOR_OLO10_P (rs2))
3528 {
3529 imm2 = rs2;
3530 rs2 = NULL;
3531 imm1 = XEXP (rs1, 1);
3532 rs1 = XEXP (rs1, 0);
3533 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3534 return 0;
3535 }
3536 }
3537 else if (GET_CODE (addr) == LO_SUM)
3538 {
3539 rs1 = XEXP (addr, 0);
3540 imm1 = XEXP (addr, 1);
3541
3542 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3543 return 0;
3544
3545 if (USE_AS_OFFSETABLE_LO10)
3546 {
3547 /* We can't allow TFmode, because an offset greater than or equal to
3548 the alignment (8) may cause the LO_SUM to overflow if !v9. */
3549 if (mode == TFmode && ! TARGET_V9)
3550 return 0;
3551 }
3552 else
3553 {
3554 /* We prohibit LO_SUM for TFmode when there are no quad move insns
3555 and we consequently need to split. We do this because LO_SUM
3556 is not an offsettable address. If we get the situation in reload
3557 where source and destination of a movtf pattern are both MEMs with
3558 LO_SUM address, then only one of them gets converted to an
3559 offsettable address. */
3560 if (mode == TFmode
3561 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3562 return 0;
3563 }
3564 }
3565 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3566 return 1;
3567 else
3568 return 0;
3569
3570 if (GET_CODE (rs1) == SUBREG)
3571 rs1 = SUBREG_REG (rs1);
3572 if (!REG_P (rs1))
3573 return 0;
3574
3575 if (rs2)
3576 {
3577 if (GET_CODE (rs2) == SUBREG)
3578 rs2 = SUBREG_REG (rs2);
3579 if (!REG_P (rs2))
3580 return 0;
3581 }
3582
3583 if (strict)
3584 {
3585 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3586 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3587 return 0;
3588 }
3589 else
3590 {
3591 if ((REGNO (rs1) >= 32
3592 && REGNO (rs1) != FRAME_POINTER_REGNUM
3593 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3594 || (rs2
3595 && (REGNO (rs2) >= 32
3596 && REGNO (rs2) != FRAME_POINTER_REGNUM
3597 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3598 return 0;
3599 }
3600 return 1;
3601 }
3602
3603 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3604
3605 static GTY(()) rtx sparc_tls_symbol;
3606 static rtx
3607 sparc_tls_get_addr (void)
3608 {
3609 if (!sparc_tls_symbol)
3610 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3611
3612 return sparc_tls_symbol;
3613 }
3614
3615 static rtx
3616 sparc_tls_got (void)
3617 {
3618 rtx temp;
3619 if (flag_pic)
3620 {
3621 current_function_uses_pic_offset_table = 1;
3622 return pic_offset_table_rtx;
3623 }
3624
3625 if (!global_offset_table)
3626 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3627 temp = gen_reg_rtx (Pmode);
3628 emit_move_insn (temp, global_offset_table);
3629 return temp;
3630 }
3631
3632
3633 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3634 this (thread-local) address. */
3635
3636 rtx
3637 legitimize_tls_address (rtx addr)
3638 {
3639 rtx temp1, temp2, temp3, ret, o0, got, insn;
3640
3641 if (no_new_pseudos)
3642 abort ();
3643
3644 if (GET_CODE (addr) == SYMBOL_REF)
3645 switch (SYMBOL_REF_TLS_MODEL (addr))
3646 {
3647 case TLS_MODEL_GLOBAL_DYNAMIC:
3648 start_sequence ();
3649 temp1 = gen_reg_rtx (SImode);
3650 temp2 = gen_reg_rtx (SImode);
3651 ret = gen_reg_rtx (Pmode);
3652 o0 = gen_rtx_REG (Pmode, 8);
3653 got = sparc_tls_got ();
3654 emit_insn (gen_tgd_hi22 (temp1, addr));
3655 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3656 if (TARGET_ARCH32)
3657 {
3658 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3659 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3660 addr, const1_rtx));
3661 }
3662 else
3663 {
3664 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3665 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3666 addr, const1_rtx));
3667 }
3668 CALL_INSN_FUNCTION_USAGE (insn)
3669 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3670 CALL_INSN_FUNCTION_USAGE (insn));
3671 insn = get_insns ();
3672 end_sequence ();
3673 emit_libcall_block (insn, ret, o0, addr);
3674 break;
3675
3676 case TLS_MODEL_LOCAL_DYNAMIC:
3677 start_sequence ();
3678 temp1 = gen_reg_rtx (SImode);
3679 temp2 = gen_reg_rtx (SImode);
3680 temp3 = gen_reg_rtx (Pmode);
3681 ret = gen_reg_rtx (Pmode);
3682 o0 = gen_rtx_REG (Pmode, 8);
3683 got = sparc_tls_got ();
3684 emit_insn (gen_tldm_hi22 (temp1));
3685 emit_insn (gen_tldm_lo10 (temp2, temp1));
3686 if (TARGET_ARCH32)
3687 {
3688 emit_insn (gen_tldm_add32 (o0, got, temp2));
3689 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3690 const1_rtx));
3691 }
3692 else
3693 {
3694 emit_insn (gen_tldm_add64 (o0, got, temp2));
3695 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3696 const1_rtx));
3697 }
3698 CALL_INSN_FUNCTION_USAGE (insn)
3699 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3700 CALL_INSN_FUNCTION_USAGE (insn));
3701 insn = get_insns ();
3702 end_sequence ();
3703 emit_libcall_block (insn, temp3, o0,
3704 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3705 UNSPEC_TLSLD_BASE));
3706 temp1 = gen_reg_rtx (SImode);
3707 temp2 = gen_reg_rtx (SImode);
3708 emit_insn (gen_tldo_hix22 (temp1, addr));
3709 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3710 if (TARGET_ARCH32)
3711 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3712 else
3713 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3714 break;
3715
3716 case TLS_MODEL_INITIAL_EXEC:
3717 temp1 = gen_reg_rtx (SImode);
3718 temp2 = gen_reg_rtx (SImode);
3719 temp3 = gen_reg_rtx (Pmode);
3720 got = sparc_tls_got ();
3721 emit_insn (gen_tie_hi22 (temp1, addr));
3722 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3723 if (TARGET_ARCH32)
3724 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3725 else
3726 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3727 if (TARGET_SUN_TLS)
3728 {
3729 ret = gen_reg_rtx (Pmode);
3730 if (TARGET_ARCH32)
3731 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3732 temp3, addr));
3733 else
3734 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3735 temp3, addr));
3736 }
3737 else
3738 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3739 break;
3740
3741 case TLS_MODEL_LOCAL_EXEC:
3742 temp1 = gen_reg_rtx (Pmode);
3743 temp2 = gen_reg_rtx (Pmode);
3744 if (TARGET_ARCH32)
3745 {
3746 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3747 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3748 }
3749 else
3750 {
3751 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3752 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3753 }
3754 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3755 break;
3756
3757 default:
3758 abort ();
3759 }
3760
3761 else
3762 abort (); /* for now ... */
3763
3764 return ret;
3765 }
3766
3767
3768 /* Legitimize PIC addresses. If the address is already position-independent,
3769 we return ORIG. Newly generated position-independent addresses go into a
3770 reg. This is REG if nonzero, otherwise we allocate register(s) as
3771 necessary. */
3772
3773 rtx
3774 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3775 rtx reg)
3776 {
3777 if (GET_CODE (orig) == SYMBOL_REF)
3778 {
3779 rtx pic_ref, address;
3780 rtx insn;
3781
3782 if (reg == 0)
3783 {
3784 if (reload_in_progress || reload_completed)
3785 abort ();
3786 else
3787 reg = gen_reg_rtx (Pmode);
3788 }
3789
3790 if (flag_pic == 2)
3791 {
3792 /* If not during reload, allocate another temp reg here for loading
3793 in the address, so that these instructions can be optimized
3794 properly. */
3795 rtx temp_reg = ((reload_in_progress || reload_completed)
3796 ? reg : gen_reg_rtx (Pmode));
3797
3798 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3799 won't get confused into thinking that these two instructions
3800 are loading in the true address of the symbol. If in the
3801 future a PIC rtx exists, that should be used instead. */
3802 if (Pmode == SImode)
3803 {
3804 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3805 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3806 }
3807 else
3808 {
3809 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3810 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3811 }
3812 address = temp_reg;
3813 }
3814 else
3815 address = orig;
3816
3817 pic_ref = gen_const_mem (Pmode,
3818 gen_rtx_PLUS (Pmode,
3819 pic_offset_table_rtx, address));
3820 current_function_uses_pic_offset_table = 1;
3821 insn = emit_move_insn (reg, pic_ref);
3822 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3823 by loop. */
3824 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3825 REG_NOTES (insn));
3826 return reg;
3827 }
3828 else if (GET_CODE (orig) == CONST)
3829 {
3830 rtx base, offset;
3831
3832 if (GET_CODE (XEXP (orig, 0)) == PLUS
3833 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3834 return orig;
3835
3836 if (reg == 0)
3837 {
3838 if (reload_in_progress || reload_completed)
3839 abort ();
3840 else
3841 reg = gen_reg_rtx (Pmode);
3842 }
3843
3844 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3845 {
3846 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3847 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3848 base == reg ? 0 : reg);
3849 }
3850 else
3851 abort ();
3852
3853 if (GET_CODE (offset) == CONST_INT)
3854 {
3855 if (SMALL_INT (offset))
3856 return plus_constant (base, INTVAL (offset));
3857 else if (! reload_in_progress && ! reload_completed)
3858 offset = force_reg (Pmode, offset);
3859 else
3860 /* If we reach here, then something is seriously wrong. */
3861 abort ();
3862 }
3863 return gen_rtx_PLUS (Pmode, base, offset);
3864 }
3865 else if (GET_CODE (orig) == LABEL_REF)
3866 /* ??? Why do we do this? */
3867 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3868 the register is live instead, in case it is eliminated. */
3869 current_function_uses_pic_offset_table = 1;
3870
3871 return orig;
3872 }
3873
3874 /* Try machine-dependent ways of modifying an illegitimate address X
3875 to be legitimate. If we find one, return the new, valid address.
3876
3877 OLDX is the address as it was before break_out_memory_refs was called.
3878 In some cases it is useful to look at this to decide what needs to be done.
3879
3880 MODE is the mode of the operand pointed to by X. */
3881
3882 rtx
3883 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3884 {
3885 rtx orig_x = x;
3886
3887 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3888 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3889 force_operand (XEXP (x, 0), NULL_RTX));
3890 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3891 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3892 force_operand (XEXP (x, 1), NULL_RTX));
3893 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3894 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3895 XEXP (x, 1));
3896 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3897 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3898 force_operand (XEXP (x, 1), NULL_RTX));
3899
3900 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3901 return x;
3902
3903 if (tls_symbolic_operand (x))
3904 x = legitimize_tls_address (x);
3905 else if (flag_pic)
3906 x = legitimize_pic_address (x, mode, 0);
3907 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3908 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3909 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3910 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3911 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3912 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3913 else if (GET_CODE (x) == SYMBOL_REF
3914 || GET_CODE (x) == CONST
3915 || GET_CODE (x) == LABEL_REF)
3916 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3917 return x;
3918 }
3919
3920 /* Emit the special PIC prologue. */
3921
3922 static void
3923 load_pic_register (void)
3924 {
3925 int orig_flag_pic = flag_pic;
3926
3927 /* If we haven't emitted the special helper function, do so now. */
3928 if (add_pc_to_pic_symbol_name[0] == 0)
3929 {
3930 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3931 int align;
3932
3933 ASM_GENERATE_INTERNAL_LABEL (add_pc_to_pic_symbol_name, "LADDPC", 0);
3934 text_section ();
3935
3936 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3937 if (align > 0)
3938 ASM_OUTPUT_ALIGN (asm_out_file, align);
3939 ASM_OUTPUT_LABEL (asm_out_file, add_pc_to_pic_symbol_name);
3940 if (flag_delayed_branch)
3941 fprintf (asm_out_file, "\tjmp %%o7+8\n\t add\t%%o7, %s, %s\n",
3942 pic_name, pic_name);
3943 else
3944 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp %%o7+8\n\t nop\n",
3945 pic_name, pic_name);
3946 }
3947
3948 /* Initialize every time through, since we can't easily
3949 know this to be permanent. */
3950 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3951 add_pc_to_pic_symbol = gen_rtx_SYMBOL_REF (Pmode, add_pc_to_pic_symbol_name);
3952
3953 flag_pic = 0;
3954 emit_insn (gen_load_pcrel_sym (pic_offset_table_rtx, global_offset_table,
3955 add_pc_to_pic_symbol));
3956 flag_pic = orig_flag_pic;
3957
3958 /* Need to emit this whether or not we obey regdecls,
3959 since setjmp/longjmp can cause life info to screw up.
3960 ??? In the case where we don't obey regdecls, this is not sufficient
3961 since we may not fall out the bottom. */
3962 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3963 }
3964 \f
3965 /* Return 1 if RTX is a MEM which is known to be aligned to at
3966 least a DESIRED byte boundary. */
3967
3968 int
3969 mem_min_alignment (rtx mem, int desired)
3970 {
3971 rtx addr, base, offset;
3972
3973 /* If it's not a MEM we can't accept it. */
3974 if (GET_CODE (mem) != MEM)
3975 return 0;
3976
3977 addr = XEXP (mem, 0);
3978 base = offset = NULL_RTX;
3979 if (GET_CODE (addr) == PLUS)
3980 {
3981 if (GET_CODE (XEXP (addr, 0)) == REG)
3982 {
3983 base = XEXP (addr, 0);
3984
3985 /* What we are saying here is that if the base
3986 REG is aligned properly, the compiler will make
3987 sure any REG based index upon it will be so
3988 as well. */
3989 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3990 offset = XEXP (addr, 1);
3991 else
3992 offset = const0_rtx;
3993 }
3994 }
3995 else if (GET_CODE (addr) == REG)
3996 {
3997 base = addr;
3998 offset = const0_rtx;
3999 }
4000
4001 if (base != NULL_RTX)
4002 {
4003 int regno = REGNO (base);
4004
4005 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4006 {
4007 /* Check if the compiler has recorded some information
4008 about the alignment of the base REG. If reload has
4009 completed, we already matched with proper alignments.
4010 If not running global_alloc, reload might give us
4011 unaligned pointer to local stack though. */
4012 if (((cfun != 0
4013 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4014 || (optimize && reload_completed))
4015 && (INTVAL (offset) & (desired - 1)) == 0)
4016 return 1;
4017 }
4018 else
4019 {
4020 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4021 return 1;
4022 }
4023 }
4024 else if (! TARGET_UNALIGNED_DOUBLES
4025 || CONSTANT_P (addr)
4026 || GET_CODE (addr) == LO_SUM)
4027 {
4028 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4029 is true, in which case we can only assume that an access is aligned if
4030 it is to a constant address, or the address involves a LO_SUM. */
4031 return 1;
4032 }
4033
4034 /* An obviously unaligned address. */
4035 return 0;
4036 }
4037
4038 \f
4039 /* Vectors to keep interesting information about registers where it can easily
4040 be got. We used to use the actual mode value as the bit number, but there
4041 are more than 32 modes now. Instead we use two tables: one indexed by
4042 hard register number, and one indexed by mode. */
4043
4044 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4045 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
4046 mapped into one sparc_mode_class mode. */
4047
4048 enum sparc_mode_class {
4049 S_MODE, D_MODE, T_MODE, O_MODE,
4050 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4051 CC_MODE, CCFP_MODE
4052 };
4053
4054 /* Modes for single-word and smaller quantities. */
4055 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4056
4057 /* Modes for double-word and smaller quantities. */
4058 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4059
4060 /* Modes for quad-word and smaller quantities. */
4061 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4062
4063 /* Modes for 8-word and smaller quantities. */
4064 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4065
4066 /* Modes for single-float quantities. We must allow any single word or
4067 smaller quantity. This is because the fix/float conversion instructions
4068 take integer inputs/outputs from the float registers. */
4069 #define SF_MODES (S_MODES)
4070
4071 /* Modes for double-float and smaller quantities. */
4072 #define DF_MODES (S_MODES | D_MODES)
4073
4074 /* Modes for double-float only quantities. */
4075 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4076
4077 /* Modes for quad-float only quantities. */
4078 #define TF_ONLY_MODES (1 << (int) TF_MODE)
4079
4080 /* Modes for quad-float and smaller quantities. */
4081 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
4082
4083 /* Modes for quad-float and double-float quantities. */
4084 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
4085
4086 /* Modes for quad-float pair only quantities. */
4087 #define OF_ONLY_MODES (1 << (int) OF_MODE)
4088
4089 /* Modes for quad-float pairs and smaller quantities. */
4090 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
4091
4092 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
4093
4094 /* Modes for condition codes. */
4095 #define CC_MODES (1 << (int) CC_MODE)
4096 #define CCFP_MODES (1 << (int) CCFP_MODE)
4097
4098 /* Value is 1 if register/mode pair is acceptable on sparc.
4099 The funny mixture of D and T modes is because integer operations
4100 do not specially operate on tetra quantities, so non-quad-aligned
4101 registers can hold quadword quantities (except %o4 and %i4 because
4102 they cross fixed registers). */
4103
4104 /* This points to either the 32 bit or the 64 bit version. */
4105 const int *hard_regno_mode_classes;
4106
4107 static const int hard_32bit_mode_classes[] = {
4108 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4109 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4110 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4111 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4112
4113 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4114 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4115 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4116 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4117
4118 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4119 and none can hold SFmode/SImode values. */
4120 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4121 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4122 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4123 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4124
4125 /* %fcc[0123] */
4126 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4127
4128 /* %icc */
4129 CC_MODES
4130 };
4131
4132 static const int hard_64bit_mode_classes[] = {
4133 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4134 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4135 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4136 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4137
4138 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4139 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4140 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4141 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4142
4143 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4144 and none can hold SFmode/SImode values. */
4145 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4146 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4147 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4148 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4149
4150 /* %fcc[0123] */
4151 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4152
4153 /* %icc */
4154 CC_MODES
4155 };
4156
4157 int sparc_mode_class [NUM_MACHINE_MODES];
4158
4159 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4160
4161 static void
4162 sparc_init_modes (void)
4163 {
4164 int i;
4165
4166 for (i = 0; i < NUM_MACHINE_MODES; i++)
4167 {
4168 switch (GET_MODE_CLASS (i))
4169 {
4170 case MODE_INT:
4171 case MODE_PARTIAL_INT:
4172 case MODE_COMPLEX_INT:
4173 if (GET_MODE_SIZE (i) <= 4)
4174 sparc_mode_class[i] = 1 << (int) S_MODE;
4175 else if (GET_MODE_SIZE (i) == 8)
4176 sparc_mode_class[i] = 1 << (int) D_MODE;
4177 else if (GET_MODE_SIZE (i) == 16)
4178 sparc_mode_class[i] = 1 << (int) T_MODE;
4179 else if (GET_MODE_SIZE (i) == 32)
4180 sparc_mode_class[i] = 1 << (int) O_MODE;
4181 else
4182 sparc_mode_class[i] = 0;
4183 break;
4184 case MODE_FLOAT:
4185 case MODE_COMPLEX_FLOAT:
4186 if (GET_MODE_SIZE (i) <= 4)
4187 sparc_mode_class[i] = 1 << (int) SF_MODE;
4188 else if (GET_MODE_SIZE (i) == 8)
4189 sparc_mode_class[i] = 1 << (int) DF_MODE;
4190 else if (GET_MODE_SIZE (i) == 16)
4191 sparc_mode_class[i] = 1 << (int) TF_MODE;
4192 else if (GET_MODE_SIZE (i) == 32)
4193 sparc_mode_class[i] = 1 << (int) OF_MODE;
4194 else
4195 sparc_mode_class[i] = 0;
4196 break;
4197 case MODE_CC:
4198 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4199 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4200 else
4201 sparc_mode_class[i] = 1 << (int) CC_MODE;
4202 break;
4203 default:
4204 sparc_mode_class[i] = 0;
4205 break;
4206 }
4207 }
4208
4209 if (TARGET_ARCH64)
4210 hard_regno_mode_classes = hard_64bit_mode_classes;
4211 else
4212 hard_regno_mode_classes = hard_32bit_mode_classes;
4213
4214 /* Initialize the array used by REGNO_REG_CLASS. */
4215 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4216 {
4217 if (i < 16 && TARGET_V8PLUS)
4218 sparc_regno_reg_class[i] = I64_REGS;
4219 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4220 sparc_regno_reg_class[i] = GENERAL_REGS;
4221 else if (i < 64)
4222 sparc_regno_reg_class[i] = FP_REGS;
4223 else if (i < 96)
4224 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4225 else if (i < 100)
4226 sparc_regno_reg_class[i] = FPCC_REGS;
4227 else
4228 sparc_regno_reg_class[i] = NO_REGS;
4229 }
4230 }
4231 \f
4232 /* Compute the frame size required by the function. This function is called
4233 during the reload pass and also by sparc_expand_prologue. */
4234
4235 HOST_WIDE_INT
4236 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4237 {
4238 int outgoing_args_size = (current_function_outgoing_args_size
4239 + REG_PARM_STACK_SPACE (current_function_decl));
4240 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4241 int i;
4242
4243 if (TARGET_ARCH64)
4244 {
4245 for (i = 0; i < 8; i++)
4246 if (regs_ever_live[i] && ! call_used_regs[i])
4247 n_regs += 2;
4248 }
4249 else
4250 {
4251 for (i = 0; i < 8; i += 2)
4252 if ((regs_ever_live[i] && ! call_used_regs[i])
4253 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4254 n_regs += 2;
4255 }
4256
4257 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4258 if ((regs_ever_live[i] && ! call_used_regs[i])
4259 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4260 n_regs += 2;
4261
4262 /* Set up values for use in prologue and epilogue. */
4263 num_gfregs = n_regs;
4264
4265 if (leaf_function_p
4266 && n_regs == 0
4267 && size == 0
4268 && current_function_outgoing_args_size == 0)
4269 actual_fsize = apparent_fsize = 0;
4270 else
4271 {
4272 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4273 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4274 apparent_fsize += n_regs * 4;
4275 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4276 }
4277
4278 /* Make sure nothing can clobber our register windows.
4279 If a SAVE must be done, or there is a stack-local variable,
4280 the register window area must be allocated.
4281 ??? For v8 we apparently need an additional 8 bytes of reserved space. */
4282 if (! leaf_function_p || size > 0)
4283 actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
4284
4285 return SPARC_STACK_ALIGN (actual_fsize);
4286 }
4287
4288 /* Output any necessary .register pseudo-ops. */
4289
4290 void
4291 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4292 {
4293 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4294 int i;
4295
4296 if (TARGET_ARCH32)
4297 return;
4298
4299 /* Check if %g[2367] were used without
4300 .register being printed for them already. */
4301 for (i = 2; i < 8; i++)
4302 {
4303 if (regs_ever_live [i]
4304 && ! sparc_hard_reg_printed [i])
4305 {
4306 sparc_hard_reg_printed [i] = 1;
4307 fprintf (file, "\t.register\t%%g%d, #scratch\n", i);
4308 }
4309 if (i == 3) i = 5;
4310 }
4311 #endif
4312 }
4313
4314 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4315 as needed. LOW should be double-word aligned for 32-bit registers.
4316 Return the new OFFSET. */
4317
4318 #define SORR_SAVE 0
4319 #define SORR_RESTORE 1
4320
4321 static int
4322 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4323 {
4324 rtx mem, insn;
4325 int i;
4326
4327 if (TARGET_ARCH64 && high <= 32)
4328 {
4329 for (i = low; i < high; i++)
4330 {
4331 if (regs_ever_live[i] && ! call_used_regs[i])
4332 {
4333 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4334 set_mem_alias_set (mem, sparc_sr_alias_set);
4335 if (action == SORR_SAVE)
4336 {
4337 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4338 RTX_FRAME_RELATED_P (insn) = 1;
4339 }
4340 else /* action == SORR_RESTORE */
4341 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4342 offset += 8;
4343 }
4344 }
4345 }
4346 else
4347 {
4348 for (i = low; i < high; i += 2)
4349 {
4350 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
4351 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
4352 enum machine_mode mode;
4353 int regno;
4354
4355 if (reg0 && reg1)
4356 {
4357 mode = i < 32 ? DImode : DFmode;
4358 regno = i;
4359 }
4360 else if (reg0)
4361 {
4362 mode = i < 32 ? SImode : SFmode;
4363 regno = i;
4364 }
4365 else if (reg1)
4366 {
4367 mode = i < 32 ? SImode : SFmode;
4368 regno = i + 1;
4369 offset += 4;
4370 }
4371 else
4372 continue;
4373
4374 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4375 set_mem_alias_set (mem, sparc_sr_alias_set);
4376 if (action == SORR_SAVE)
4377 {
4378 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4379 RTX_FRAME_RELATED_P (insn) = 1;
4380 }
4381 else /* action == SORR_RESTORE */
4382 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4383
4384 /* Always preserve double-word alignment. */
4385 offset = (offset + 7) & -8;
4386 }
4387 }
4388
4389 return offset;
4390 }
4391
4392 /* Emit code to save call-saved registers. */
4393
4394 static void
4395 emit_save_regs (void)
4396 {
4397 HOST_WIDE_INT offset;
4398 rtx base;
4399
4400 offset = frame_base_offset - apparent_fsize;
4401
4402 if (offset < -4096 || offset + num_gfregs * 4 > 4096)
4403 {
4404 /* ??? This might be optimized a little as %g1 might already have a
4405 value close enough that a single add insn will do. */
4406 /* ??? Although, all of this is probably only a temporary fix
4407 because if %g1 can hold a function result, then
4408 sparc_expand_epilogue will lose (the result will be
4409 clobbered). */
4410 base = gen_rtx_REG (Pmode, 1);
4411 emit_move_insn (base, GEN_INT (offset));
4412 emit_insn (gen_rtx_SET (VOIDmode,
4413 base,
4414 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4415 offset = 0;
4416 }
4417 else
4418 base = frame_base_reg;
4419
4420 offset = save_or_restore_regs (0, 8, base, offset, SORR_SAVE);
4421 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_SAVE);
4422 }
4423
4424 /* Emit code to restore call-saved registers. */
4425
4426 static void
4427 emit_restore_regs (void)
4428 {
4429 HOST_WIDE_INT offset;
4430 rtx base;
4431
4432 offset = frame_base_offset - apparent_fsize;
4433
4434 if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
4435 {
4436 base = gen_rtx_REG (Pmode, 1);
4437 emit_move_insn (base, GEN_INT (offset));
4438 emit_insn (gen_rtx_SET (VOIDmode,
4439 base,
4440 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4441 offset = 0;
4442 }
4443 else
4444 base = frame_base_reg;
4445
4446 offset = save_or_restore_regs (0, 8, base, offset, SORR_RESTORE);
4447 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_RESTORE);
4448 }
4449
4450 /* Emit an increment for the stack pointer. */
4451
4452 static void
4453 emit_stack_pointer_increment (rtx increment)
4454 {
4455 if (TARGET_ARCH64)
4456 emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4457 else
4458 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4459 }
4460
4461 /* Emit a decrement for the stack pointer. */
4462
4463 static void
4464 emit_stack_pointer_decrement (rtx decrement)
4465 {
4466 if (TARGET_ARCH64)
4467 emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4468 else
4469 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4470 }
4471
4472 /* Expand the function prologue. The prologue is responsible for reserving
4473 storage for the frame, saving the call-saved registers and loading the
4474 PIC register if needed. */
4475
4476 void
4477 sparc_expand_prologue (void)
4478 {
4479 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4480 on the final value of the flag means deferring the prologue/epilogue
4481 expansion until just before the second scheduling pass, which is too
4482 late to emit multiple epilogues or return insns.
4483
4484 Of course we are making the assumption that the value of the flag
4485 will not change between now and its final value. Of the three parts
4486 of the formula, only the last one can reasonably vary. Let's take a
4487 closer look, after assuming that the first two ones are set to true
4488 (otherwise the last value is effectively silenced).
4489
4490 If only_leaf_regs_used returns false, the global predicate will also
4491 be false so the actual frame size calculated below will be positive.
4492 As a consequence, the save_register_window insn will be emitted in
4493 the instruction stream; now this insn explicitly references %fp
4494 which is not a leaf register so only_leaf_regs_used will always
4495 return false subsequently.
4496
4497 If only_leaf_regs_used returns true, we hope that the subsequent
4498 optimization passes won't cause non-leaf registers to pop up. For
4499 example, the regrename pass has special provisions to not rename to
4500 non-leaf registers in a leaf function. */
4501 sparc_leaf_function_p
4502 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4503
4504 /* Need to use actual_fsize, since we are also allocating
4505 space for our callee (and our own register save area). */
4506 actual_fsize
4507 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4508
4509 /* Advertise that the data calculated just above are now valid. */
4510 sparc_prologue_data_valid_p = true;
4511
4512 if (sparc_leaf_function_p)
4513 {
4514 frame_base_reg = stack_pointer_rtx;
4515 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4516 }
4517 else
4518 {
4519 frame_base_reg = hard_frame_pointer_rtx;
4520 frame_base_offset = SPARC_STACK_BIAS;
4521 }
4522
4523 if (actual_fsize == 0)
4524 /* do nothing. */ ;
4525 else if (sparc_leaf_function_p)
4526 {
4527 if (actual_fsize <= 4096)
4528 emit_stack_pointer_increment (GEN_INT (- actual_fsize));
4529 else if (actual_fsize <= 8192)
4530 {
4531 emit_stack_pointer_increment (GEN_INT (-4096));
4532 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4533 }
4534 else
4535 {
4536 rtx reg = gen_rtx_REG (Pmode, 1);
4537 emit_move_insn (reg, GEN_INT (-actual_fsize));
4538 emit_stack_pointer_increment (reg);
4539 }
4540 }
4541 else
4542 {
4543 if (actual_fsize <= 4096)
4544 emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4545 else if (actual_fsize <= 8192)
4546 {
4547 emit_insn (gen_save_register_window (GEN_INT (-4096)));
4548 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4549 }
4550 else
4551 {
4552 rtx reg = gen_rtx_REG (Pmode, 1);
4553 emit_move_insn (reg, GEN_INT (-actual_fsize));
4554 emit_insn (gen_save_register_window (reg));
4555 }
4556 }
4557
4558 /* Call-saved registers are saved just above the outgoing argument area. */
4559 if (num_gfregs)
4560 emit_save_regs ();
4561
4562 /* Load the PIC register if needed. */
4563 if (flag_pic && current_function_uses_pic_offset_table)
4564 load_pic_register ();
4565 }
4566
4567 /* This function generates the assembly code for function entry, which boils
4568 down to emitting the necessary .register directives. It also informs the
4569 DWARF-2 back-end on the layout of the frame.
4570
4571 ??? Historical cruft: "On SPARC, move-double insns between fpu and cpu need
4572 an 8-byte block of memory. If any fpu reg is used in the function, we
4573 allocate such a block here, at the bottom of the frame, just in case it's
4574 needed." Could this explain the -8 in emit_restore_regs? */
4575
4576 static void
4577 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4578 {
4579 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4580 if (sparc_leaf_function_p != current_function_uses_only_leaf_regs)
4581 abort();
4582
4583 sparc_output_scratch_registers (file);
4584
4585 if (dwarf2out_do_frame () && actual_fsize)
4586 {
4587 char *label = dwarf2out_cfi_label ();
4588
4589 /* The canonical frame address refers to the top of the frame. */
4590 dwarf2out_def_cfa (label,
4591 sparc_leaf_function_p
4592 ? STACK_POINTER_REGNUM
4593 : HARD_FRAME_POINTER_REGNUM,
4594 frame_base_offset);
4595
4596 if (! sparc_leaf_function_p)
4597 {
4598 /* Note the register window save. This tells the unwinder that
4599 it needs to restore the window registers from the previous
4600 frame's window save area at 0(cfa). */
4601 dwarf2out_window_save (label);
4602
4603 /* The return address (-8) is now in %i7. */
4604 dwarf2out_return_reg (label, 31);
4605 }
4606 }
4607 }
4608
4609 /* Expand the function epilogue, either normal or part of a sibcall.
4610 We emit all the instructions except the return or the call. */
4611
4612 void
4613 sparc_expand_epilogue (void)
4614 {
4615 if (num_gfregs)
4616 emit_restore_regs ();
4617
4618 if (actual_fsize == 0)
4619 /* do nothing. */ ;
4620 else if (sparc_leaf_function_p)
4621 {
4622 if (actual_fsize <= 4096)
4623 emit_stack_pointer_decrement (GEN_INT (- actual_fsize));
4624 else if (actual_fsize <= 8192)
4625 {
4626 emit_stack_pointer_decrement (GEN_INT (-4096));
4627 emit_stack_pointer_decrement (GEN_INT (4096 - actual_fsize));
4628 }
4629 else
4630 {
4631 rtx reg = gen_rtx_REG (Pmode, 1);
4632 emit_move_insn (reg, GEN_INT (-actual_fsize));
4633 emit_stack_pointer_decrement (reg);
4634 }
4635 }
4636 }
4637
4638 /* Return true if it is appropriate to emit `return' instructions in the
4639 body of a function. */
4640
4641 bool
4642 sparc_can_use_return_insn_p (void)
4643 {
4644 return sparc_prologue_data_valid_p
4645 && (actual_fsize == 0 || !sparc_leaf_function_p);
4646 }
4647
4648 /* This function generates the assembly code for function exit. */
4649
4650 static void
4651 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4652 {
4653 /* If code does not drop into the epilogue, we have to still output
4654 a dummy nop for the sake of sane backtraces. Otherwise, if the
4655 last two instructions of a function were "call foo; dslot;" this
4656 can make the return PC of foo (i.e. address of call instruction
4657 plus 8) point to the first instruction in the next function. */
4658
4659 rtx insn, last_real_insn;
4660
4661 insn = get_last_insn ();
4662
4663 last_real_insn = prev_real_insn (insn);
4664 if (last_real_insn
4665 && GET_CODE (last_real_insn) == INSN
4666 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4667 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4668
4669 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4670 fputs("\tnop\n", file);
4671
4672 sparc_output_deferred_case_vectors ();
4673 }
4674
4675 /* Output a 'restore' instruction. */
4676
4677 static void
4678 output_restore (rtx pat)
4679 {
4680 rtx operands[3];
4681
4682 if (! pat)
4683 {
4684 fputs ("\t restore\n", asm_out_file);
4685 return;
4686 }
4687
4688 if (GET_CODE (pat) != SET)
4689 abort ();
4690
4691 operands[0] = SET_DEST (pat);
4692 pat = SET_SRC (pat);
4693
4694 switch (GET_CODE (pat))
4695 {
4696 case PLUS:
4697 operands[1] = XEXP (pat, 0);
4698 operands[2] = XEXP (pat, 1);
4699 output_asm_insn (" restore %r1, %2, %Y0", operands);
4700 break;
4701 case LO_SUM:
4702 operands[1] = XEXP (pat, 0);
4703 operands[2] = XEXP (pat, 1);
4704 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4705 break;
4706 case ASHIFT:
4707 operands[1] = XEXP (pat, 0);
4708 if (XEXP (pat, 1) != const1_rtx)
4709 abort();
4710 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4711 break;
4712 default:
4713 operands[1] = pat;
4714 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4715 break;
4716 }
4717 }
4718
4719 /* Output a return. */
4720
4721 const char *
4722 output_return (rtx insn)
4723 {
4724 if (sparc_leaf_function_p)
4725 {
4726 /* This is a leaf function so we don't have to bother restoring the
4727 register window, which frees us from dealing with the convoluted
4728 semantics of restore/return. We simply output the jump to the
4729 return address and the insn in the delay slot (if any). */
4730
4731 if (current_function_calls_eh_return)
4732 abort ();
4733
4734 return "jmp\t%%o7+%)%#";
4735 }
4736 else
4737 {
4738 /* This is a regular function so we have to restore the register window.
4739 We may have a pending insn for the delay slot, which will be either
4740 combined with the 'restore' instruction or put in the delay slot of
4741 the 'return' instruction. */
4742
4743 if (current_function_calls_eh_return)
4744 {
4745 /* If the function uses __builtin_eh_return, the eh_return
4746 machinery occupies the delay slot. */
4747 if (final_sequence)
4748 abort ();
4749
4750 if (! flag_delayed_branch)
4751 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4752
4753 if (TARGET_V9)
4754 fputs ("\treturn\t%i7+8\n", asm_out_file);
4755 else
4756 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4757
4758 if (flag_delayed_branch)
4759 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4760 else
4761 fputs ("\t nop\n", asm_out_file);
4762 }
4763 else if (final_sequence)
4764 {
4765 rtx delay, pat;
4766
4767 delay = NEXT_INSN (insn);
4768 if (! delay)
4769 abort ();
4770
4771 pat = PATTERN (delay);
4772
4773 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4774 {
4775 epilogue_renumber (&pat, 0);
4776 return "return\t%%i7+%)%#";
4777 }
4778 else
4779 {
4780 output_asm_insn ("jmp\t%%i7+%)", NULL);
4781 output_restore (pat);
4782 PATTERN (delay) = gen_blockage ();
4783 INSN_CODE (delay) = -1;
4784 }
4785 }
4786 else
4787 {
4788 /* The delay slot is empty. */
4789 if (TARGET_V9)
4790 return "return\t%%i7+%)\n\t nop";
4791 else if (flag_delayed_branch)
4792 return "jmp\t%%i7+%)\n\t restore";
4793 else
4794 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4795 }
4796 }
4797
4798 return "";
4799 }
4800
4801 /* Output a sibling call. */
4802
4803 const char *
4804 output_sibcall (rtx insn, rtx call_operand)
4805 {
4806 rtx operands[1];
4807
4808 if (! flag_delayed_branch)
4809 abort();
4810
4811 operands[0] = call_operand;
4812
4813 if (sparc_leaf_function_p)
4814 {
4815 /* This is a leaf function so we don't have to bother restoring the
4816 register window. We simply output the jump to the function and
4817 the insn in the delay slot (if any). */
4818
4819 if (LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence)
4820 abort();
4821
4822 if (final_sequence)
4823 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4824 operands);
4825 else
4826 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4827 it into branch if possible. */
4828 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4829 operands);
4830 }
4831 else
4832 {
4833 /* This is a regular function so we have to restore the register window.
4834 We may have a pending insn for the delay slot, which will be combined
4835 with the 'restore' instruction. */
4836
4837 output_asm_insn ("call\t%a0, 0", operands);
4838
4839 if (final_sequence)
4840 {
4841 rtx delay = NEXT_INSN (insn);
4842 if (! delay)
4843 abort ();
4844
4845 output_restore (PATTERN (delay));
4846
4847 PATTERN (delay) = gen_blockage ();
4848 INSN_CODE (delay) = -1;
4849 }
4850 else
4851 output_restore (NULL_RTX);
4852 }
4853
4854 return "";
4855 }
4856 \f
4857 /* Functions for handling argument passing.
4858
4859 For 32-bit, the first 6 args are normally in registers and the rest are
4860 pushed. Any arg that starts within the first 6 words is at least
4861 partially passed in a register unless its data type forbids.
4862
4863 For 64-bit, the argument registers are laid out as an array of 16 elements
4864 and arguments are added sequentially. The first 6 int args and up to the
4865 first 16 fp args (depending on size) are passed in regs.
4866
4867 Slot Stack Integral Float Float in structure Double Long Double
4868 ---- ----- -------- ----- ------------------ ------ -----------
4869 15 [SP+248] %f31 %f30,%f31 %d30
4870 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4871 13 [SP+232] %f27 %f26,%f27 %d26
4872 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4873 11 [SP+216] %f23 %f22,%f23 %d22
4874 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4875 9 [SP+200] %f19 %f18,%f19 %d18
4876 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4877 7 [SP+184] %f15 %f14,%f15 %d14
4878 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4879 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4880 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4881 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4882 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4883 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4884 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4885
4886 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4887
4888 Integral arguments are always passed as 64-bit quantities appropriately
4889 extended.
4890
4891 Passing of floating point values is handled as follows.
4892 If a prototype is in scope:
4893 If the value is in a named argument (i.e. not a stdarg function or a
4894 value not part of the `...') then the value is passed in the appropriate
4895 fp reg.
4896 If the value is part of the `...' and is passed in one of the first 6
4897 slots then the value is passed in the appropriate int reg.
4898 If the value is part of the `...' and is not passed in one of the first 6
4899 slots then the value is passed in memory.
4900 If a prototype is not in scope:
4901 If the value is one of the first 6 arguments the value is passed in the
4902 appropriate integer reg and the appropriate fp reg.
4903 If the value is not one of the first 6 arguments the value is passed in
4904 the appropriate fp reg and in memory.
4905
4906
4907 Summary of the calling conventions implemented by GCC on SPARC:
4908
4909 32-bit ABI:
4910 size argument return value
4911
4912 small integer <4 int. reg. int. reg.
4913 word 4 int. reg. int. reg.
4914 double word 8 int. reg. int. reg.
4915
4916 _Complex small integer <8 int. reg. int. reg.
4917 _Complex word 8 int. reg. int. reg.
4918 _Complex double word 16 memory int. reg.
4919
4920 vector integer <=8 int. reg. FP reg.
4921 vector integer >8 memory memory
4922
4923 float 4 int. reg. FP reg.
4924 double 8 int. reg. FP reg.
4925 long double 16 memory memory
4926
4927 _Complex float 8 memory FP reg.
4928 _Complex double 16 memory FP reg.
4929 _Complex long double 32 memory FP reg.
4930
4931 vector float <=32 memory FP reg.
4932 vector float >32 memory memory
4933
4934 aggregate any memory memory
4935
4936
4937
4938 64-bit ABI:
4939 size argument return value
4940
4941 small integer <8 int. reg. int. reg.
4942 word 8 int. reg. int. reg.
4943 double word 16 int. reg. int. reg.
4944
4945 _Complex small integer <16 int. reg. int. reg.
4946 _Complex word 16 int. reg. int. reg.
4947 _Complex double word 32 memory int. reg.
4948
4949 vector integer <=16 FP reg. FP reg.
4950 vector integer 16<s<=32 memory FP reg.
4951 vector integer >32 memory memory
4952
4953 float 4 FP reg. FP reg.
4954 double 8 FP reg. FP reg.
4955 long double 16 FP reg. FP reg.
4956
4957 _Complex float 8 FP reg. FP reg.
4958 _Complex double 16 FP reg. FP reg.
4959 _Complex long double 32 memory FP reg.
4960
4961 vector float <=16 FP reg. FP reg.
4962 vector float 16<s<=32 memory FP reg.
4963 vector float >32 memory memory
4964
4965 aggregate <=16 reg. reg.
4966 aggregate 16<s<=32 memory reg.
4967 aggregate >32 memory memory
4968
4969
4970
4971 Note #1: complex floating-point types follow the extended SPARC ABIs as
4972 implemented by the Sun compiler.
4973
4974 Note #2: integral vector types follow the scalar floating-point types
4975 conventions to match what is implemented by the Sun VIS SDK.
4976
4977 Note #3: floating-point vector types follow the complex floating-point
4978 types conventions. */
4979
4980
4981 /* Maximum number of int regs for args. */
4982 #define SPARC_INT_ARG_MAX 6
4983 /* Maximum number of fp regs for args. */
4984 #define SPARC_FP_ARG_MAX 16
4985
4986 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4987
4988 /* Handle the INIT_CUMULATIVE_ARGS macro.
4989 Initialize a variable CUM of type CUMULATIVE_ARGS
4990 for a call to a function whose data type is FNTYPE.
4991 For a library call, FNTYPE is 0. */
4992
4993 void
4994 init_cumulative_args (struct sparc_args *cum, tree fntype,
4995 rtx libname ATTRIBUTE_UNUSED,
4996 tree fndecl ATTRIBUTE_UNUSED)
4997 {
4998 cum->words = 0;
4999 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
5000 cum->libcall_p = fntype == 0;
5001 }
5002
5003 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
5004 When a prototype says `char' or `short', really pass an `int'. */
5005
5006 static bool
5007 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
5008 {
5009 return TARGET_ARCH32 ? true : false;
5010 }
5011
5012 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5013
5014 static bool
5015 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
5016 {
5017 return TARGET_ARCH64 ? true : false;
5018 }
5019
5020 /* Scan the record type TYPE and return the following predicates:
5021 - INTREGS_P: the record contains at least one field or sub-field
5022 that is eligible for promotion in integer registers.
5023 - FP_REGS_P: the record contains at least one field or sub-field
5024 that is eligible for promotion in floating-point registers.
5025 - PACKED_P: the record contains at least one field that is packed.
5026
5027 Sub-fields are not taken into account for the PACKED_P predicate. */
5028
5029 static void
5030 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
5031 {
5032 tree field;
5033
5034 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5035 {
5036 if (TREE_CODE (field) == FIELD_DECL)
5037 {
5038 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5039 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5040 else if (FLOAT_TYPE_P (TREE_TYPE (field)) && TARGET_FPU)
5041 *fpregs_p = 1;
5042 else
5043 *intregs_p = 1;
5044
5045 if (packed_p && DECL_PACKED (field))
5046 *packed_p = 1;
5047 }
5048 }
5049 }
5050
5051 /* Compute the slot number to pass an argument in.
5052 Return the slot number or -1 if passing on the stack.
5053
5054 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5055 the preceding args and about the function being called.
5056 MODE is the argument's machine mode.
5057 TYPE is the data type of the argument (as a tree).
5058 This is null for libcalls where that information may
5059 not be available.
5060 NAMED is nonzero if this argument is a named parameter
5061 (otherwise it is an extra parameter matching an ellipsis).
5062 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5063 *PREGNO records the register number to use if scalar type.
5064 *PPADDING records the amount of padding needed in words. */
5065
5066 static int
5067 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5068 tree type, int named, int incoming_p,
5069 int *pregno, int *ppadding)
5070 {
5071 int regbase = (incoming_p
5072 ? SPARC_INCOMING_INT_ARG_FIRST
5073 : SPARC_OUTGOING_INT_ARG_FIRST);
5074 int slotno = cum->words;
5075 int regno;
5076
5077 *ppadding = 0;
5078
5079 if (type && TREE_ADDRESSABLE (type))
5080 return -1;
5081
5082 if (TARGET_ARCH32
5083 && mode == BLKmode
5084 && type
5085 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5086 return -1;
5087
5088 /* For SPARC64, objects requiring 16-byte alignment get it. */
5089 if (TARGET_ARCH64
5090 && GET_MODE_ALIGNMENT (mode) >= 2 * BITS_PER_WORD
5091 && (slotno & 1) != 0)
5092 slotno++, *ppadding = 1;
5093
5094 switch (GET_MODE_CLASS (mode))
5095 {
5096 case MODE_FLOAT:
5097 case MODE_COMPLEX_FLOAT:
5098 case MODE_VECTOR_INT:
5099 case MODE_VECTOR_FLOAT:
5100 if (TARGET_ARCH64 && TARGET_FPU && named)
5101 {
5102 if (slotno >= SPARC_FP_ARG_MAX)
5103 return -1;
5104 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5105 /* Arguments filling only one single FP register are
5106 right-justified in the outer double FP register. */
5107 if (GET_MODE_SIZE (mode) <= 4)
5108 regno++;
5109 break;
5110 }
5111 /* fallthrough */
5112
5113 case MODE_INT:
5114 case MODE_COMPLEX_INT:
5115 if (slotno >= SPARC_INT_ARG_MAX)
5116 return -1;
5117 regno = regbase + slotno;
5118 break;
5119
5120 case MODE_RANDOM:
5121 if (mode == VOIDmode)
5122 /* MODE is VOIDmode when generating the actual call. */
5123 return -1;
5124
5125 if (mode != BLKmode)
5126 abort ();
5127
5128 /* For SPARC64, objects requiring 16-byte alignment get it. */
5129 if (TARGET_ARCH64
5130 && type
5131 && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
5132 && (slotno & 1) != 0)
5133 slotno++, *ppadding = 1;
5134
5135 if (TARGET_ARCH32 || (type && TREE_CODE (type) == UNION_TYPE))
5136 {
5137 if (slotno >= SPARC_INT_ARG_MAX)
5138 return -1;
5139 regno = regbase + slotno;
5140 }
5141 else /* TARGET_ARCH64 && type && TREE_CODE (type) == RECORD_TYPE */
5142 {
5143 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5144
5145 /* First see what kinds of registers we would need. */
5146 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5147
5148 /* The ABI obviously doesn't specify how packed structures
5149 are passed. These are defined to be passed in int regs
5150 if possible, otherwise memory. */
5151 if (packed_p || !named)
5152 fpregs_p = 0, intregs_p = 1;
5153
5154 /* If all arg slots are filled, then must pass on stack. */
5155 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5156 return -1;
5157
5158 /* If there are only int args and all int arg slots are filled,
5159 then must pass on stack. */
5160 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5161 return -1;
5162
5163 /* Note that even if all int arg slots are filled, fp members may
5164 still be passed in regs if such regs are available.
5165 *PREGNO isn't set because there may be more than one, it's up
5166 to the caller to compute them. */
5167 return slotno;
5168 }
5169 break;
5170
5171 default :
5172 abort ();
5173 }
5174
5175 *pregno = regno;
5176 return slotno;
5177 }
5178
5179 /* Handle recursive register counting for structure field layout. */
5180
5181 struct function_arg_record_value_parms
5182 {
5183 rtx ret; /* return expression being built. */
5184 int slotno; /* slot number of the argument. */
5185 int named; /* whether the argument is named. */
5186 int regbase; /* regno of the base register. */
5187 int stack; /* 1 if part of the argument is on the stack. */
5188 int intoffset; /* offset of the first pending integer field. */
5189 unsigned int nregs; /* number of words passed in registers. */
5190 };
5191
5192 static void function_arg_record_value_3
5193 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5194 static void function_arg_record_value_2
5195 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5196 static void function_arg_record_value_1
5197 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5198 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
5199 static rtx function_arg_union_value (int, enum machine_mode, int);
5200
5201 /* A subroutine of function_arg_record_value. Traverse the structure
5202 recursively and determine how many registers will be required. */
5203
5204 static void
5205 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
5206 struct function_arg_record_value_parms *parms,
5207 bool packed_p)
5208 {
5209 tree field;
5210
5211 /* We need to compute how many registers are needed so we can
5212 allocate the PARALLEL but before we can do that we need to know
5213 whether there are any packed fields. The ABI obviously doesn't
5214 specify how structures are passed in this case, so they are
5215 defined to be passed in int regs if possible, otherwise memory,
5216 regardless of whether there are fp values present. */
5217
5218 if (! packed_p)
5219 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5220 {
5221 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5222 {
5223 packed_p = true;
5224 break;
5225 }
5226 }
5227
5228 /* Compute how many registers we need. */
5229 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5230 {
5231 if (TREE_CODE (field) == FIELD_DECL)
5232 {
5233 HOST_WIDE_INT bitpos = startbitpos;
5234
5235 if (DECL_SIZE (field) != 0
5236 && host_integerp (bit_position (field), 1))
5237 bitpos += int_bit_position (field);
5238
5239 /* ??? FIXME: else assume zero offset. */
5240
5241 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5242 function_arg_record_value_1 (TREE_TYPE (field),
5243 bitpos,
5244 parms,
5245 packed_p);
5246 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5247 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5248 && TARGET_FPU
5249 && parms->named
5250 && ! packed_p)
5251 {
5252 if (parms->intoffset != -1)
5253 {
5254 unsigned int startbit, endbit;
5255 int intslots, this_slotno;
5256
5257 startbit = parms->intoffset & -BITS_PER_WORD;
5258 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5259
5260 intslots = (endbit - startbit) / BITS_PER_WORD;
5261 this_slotno = parms->slotno + parms->intoffset
5262 / BITS_PER_WORD;
5263
5264 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5265 {
5266 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5267 /* We need to pass this field on the stack. */
5268 parms->stack = 1;
5269 }
5270
5271 parms->nregs += intslots;
5272 parms->intoffset = -1;
5273 }
5274
5275 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5276 If it wasn't true we wouldn't be here. */
5277 parms->nregs += 1;
5278 if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5279 parms->nregs += 1;
5280 }
5281 else
5282 {
5283 if (parms->intoffset == -1)
5284 parms->intoffset = bitpos;
5285 }
5286 }
5287 }
5288 }
5289
5290 /* A subroutine of function_arg_record_value. Assign the bits of the
5291 structure between parms->intoffset and bitpos to integer registers. */
5292
5293 static void
5294 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5295 struct function_arg_record_value_parms *parms)
5296 {
5297 enum machine_mode mode;
5298 unsigned int regno;
5299 unsigned int startbit, endbit;
5300 int this_slotno, intslots, intoffset;
5301 rtx reg;
5302
5303 if (parms->intoffset == -1)
5304 return;
5305
5306 intoffset = parms->intoffset;
5307 parms->intoffset = -1;
5308
5309 startbit = intoffset & -BITS_PER_WORD;
5310 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5311 intslots = (endbit - startbit) / BITS_PER_WORD;
5312 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5313
5314 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5315 if (intslots <= 0)
5316 return;
5317
5318 /* If this is the trailing part of a word, only load that much into
5319 the register. Otherwise load the whole register. Note that in
5320 the latter case we may pick up unwanted bits. It's not a problem
5321 at the moment but may wish to revisit. */
5322
5323 if (intoffset % BITS_PER_WORD != 0)
5324 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5325 MODE_INT, 0);
5326 else
5327 mode = word_mode;
5328
5329 intoffset /= BITS_PER_UNIT;
5330 do
5331 {
5332 regno = parms->regbase + this_slotno;
5333 reg = gen_rtx_REG (mode, regno);
5334 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5335 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5336
5337 this_slotno += 1;
5338 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5339 mode = word_mode;
5340 parms->nregs += 1;
5341 intslots -= 1;
5342 }
5343 while (intslots > 0);
5344 }
5345
5346 /* A subroutine of function_arg_record_value. Traverse the structure
5347 recursively and assign bits to floating point registers. Track which
5348 bits in between need integer registers; invoke function_arg_record_value_3
5349 to make that happen. */
5350
5351 static void
5352 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
5353 struct function_arg_record_value_parms *parms,
5354 bool packed_p)
5355 {
5356 tree field;
5357
5358 if (! packed_p)
5359 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5360 {
5361 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5362 {
5363 packed_p = true;
5364 break;
5365 }
5366 }
5367
5368 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5369 {
5370 if (TREE_CODE (field) == FIELD_DECL)
5371 {
5372 HOST_WIDE_INT bitpos = startbitpos;
5373
5374 if (DECL_SIZE (field) != 0
5375 && host_integerp (bit_position (field), 1))
5376 bitpos += int_bit_position (field);
5377
5378 /* ??? FIXME: else assume zero offset. */
5379
5380 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5381 function_arg_record_value_2 (TREE_TYPE (field),
5382 bitpos,
5383 parms,
5384 packed_p);
5385 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5386 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5387 && TARGET_FPU
5388 && parms->named
5389 && ! packed_p)
5390 {
5391 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5392 int regno;
5393 enum machine_mode mode = DECL_MODE (field);
5394 rtx reg;
5395
5396 function_arg_record_value_3 (bitpos, parms);
5397 switch (mode)
5398 {
5399 case SCmode: mode = SFmode; break;
5400 case DCmode: mode = DFmode; break;
5401 case TCmode: mode = TFmode; break;
5402 default: break;
5403 }
5404 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5405 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5406 regno++;
5407 reg = gen_rtx_REG (mode, regno);
5408 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5409 = gen_rtx_EXPR_LIST (VOIDmode, reg,
5410 GEN_INT (bitpos / BITS_PER_UNIT));
5411 parms->nregs += 1;
5412 if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5413 {
5414 regno += GET_MODE_SIZE (mode) / 4;
5415 reg = gen_rtx_REG (mode, regno);
5416 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5417 = gen_rtx_EXPR_LIST (VOIDmode, reg,
5418 GEN_INT ((bitpos + GET_MODE_BITSIZE (mode))
5419 / BITS_PER_UNIT));
5420 parms->nregs += 1;
5421 }
5422 }
5423 else
5424 {
5425 if (parms->intoffset == -1)
5426 parms->intoffset = bitpos;
5427 }
5428 }
5429 }
5430 }
5431
5432 /* Used by function_arg and function_value to implement the complex
5433 conventions of the 64-bit ABI for passing and returning structures.
5434 Return an expression valid as a return value for the two macros
5435 FUNCTION_ARG and FUNCTION_VALUE.
5436
5437 TYPE is the data type of the argument (as a tree).
5438 This is null for libcalls where that information may
5439 not be available.
5440 MODE is the argument's machine mode.
5441 SLOTNO is the index number of the argument's slot in the parameter array.
5442 NAMED is nonzero if this argument is a named parameter
5443 (otherwise it is an extra parameter matching an ellipsis).
5444 REGBASE is the regno of the base register for the parameter array. */
5445
5446 static rtx
5447 function_arg_record_value (tree type, enum machine_mode mode,
5448 int slotno, int named, int regbase)
5449 {
5450 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5451 struct function_arg_record_value_parms parms;
5452 unsigned int nregs;
5453
5454 parms.ret = NULL_RTX;
5455 parms.slotno = slotno;
5456 parms.named = named;
5457 parms.regbase = regbase;
5458 parms.stack = 0;
5459
5460 /* Compute how many registers we need. */
5461 parms.nregs = 0;
5462 parms.intoffset = 0;
5463 function_arg_record_value_1 (type, 0, &parms, false);
5464
5465 /* Take into account pending integer fields. */
5466 if (parms.intoffset != -1)
5467 {
5468 unsigned int startbit, endbit;
5469 int intslots, this_slotno;
5470
5471 startbit = parms.intoffset & -BITS_PER_WORD;
5472 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5473 intslots = (endbit - startbit) / BITS_PER_WORD;
5474 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5475
5476 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5477 {
5478 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5479 /* We need to pass this field on the stack. */
5480 parms.stack = 1;
5481 }
5482
5483 parms.nregs += intslots;
5484 }
5485 nregs = parms.nregs;
5486
5487 /* Allocate the vector and handle some annoying special cases. */
5488 if (nregs == 0)
5489 {
5490 /* ??? Empty structure has no value? Duh? */
5491 if (typesize <= 0)
5492 {
5493 /* Though there's nothing really to store, return a word register
5494 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5495 leads to breakage due to the fact that there are zero bytes to
5496 load. */
5497 return gen_rtx_REG (mode, regbase);
5498 }
5499 else
5500 {
5501 /* ??? C++ has structures with no fields, and yet a size. Give up
5502 for now and pass everything back in integer registers. */
5503 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5504 }
5505 if (nregs + slotno > SPARC_INT_ARG_MAX)
5506 nregs = SPARC_INT_ARG_MAX - slotno;
5507 }
5508 if (nregs == 0)
5509 abort ();
5510
5511 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5512
5513 /* If at least one field must be passed on the stack, generate
5514 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5515 also be passed on the stack. We can't do much better because the
5516 semantics of FUNCTION_ARG_PARTIAL_NREGS doesn't handle the case
5517 of structures for which the fields passed exclusively in registers
5518 are not at the beginning of the structure. */
5519 if (parms.stack)
5520 XVECEXP (parms.ret, 0, 0)
5521 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5522
5523 /* Fill in the entries. */
5524 parms.nregs = 0;
5525 parms.intoffset = 0;
5526 function_arg_record_value_2 (type, 0, &parms, false);
5527 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5528
5529 if (parms.nregs != nregs)
5530 abort ();
5531
5532 return parms.ret;
5533 }
5534
5535 /* Used by function_arg and function_value to implement the conventions
5536 of the 64-bit ABI for passing and returning unions.
5537 Return an expression valid as a return value for the two macros
5538 FUNCTION_ARG and FUNCTION_VALUE.
5539
5540 SIZE is the size in bytes of the union.
5541 MODE is the argument's machine mode.
5542 REGNO is the hard register the union will be passed in. */
5543
5544 static rtx
5545 function_arg_union_value (int size, enum machine_mode mode, int regno)
5546 {
5547 int nwords = ROUND_ADVANCE (size), i;
5548 rtx regs;
5549
5550 /* Unions are passed left-justified. */
5551 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5552
5553 for (i = 0; i < nwords; i++)
5554 XVECEXP (regs, 0, i)
5555 = gen_rtx_EXPR_LIST (VOIDmode,
5556 gen_rtx_REG (word_mode, regno + i),
5557 GEN_INT (UNITS_PER_WORD * i));
5558
5559 return regs;
5560 }
5561
5562 /* Handle the FUNCTION_ARG macro.
5563 Determine where to put an argument to a function.
5564 Value is zero to push the argument on the stack,
5565 or a hard register in which to store the argument.
5566
5567 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5568 the preceding args and about the function being called.
5569 MODE is the argument's machine mode.
5570 TYPE is the data type of the argument (as a tree).
5571 This is null for libcalls where that information may
5572 not be available.
5573 NAMED is nonzero if this argument is a named parameter
5574 (otherwise it is an extra parameter matching an ellipsis).
5575 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5576
5577 rtx
5578 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5579 tree type, int named, int incoming_p)
5580 {
5581 int regbase = (incoming_p
5582 ? SPARC_INCOMING_INT_ARG_FIRST
5583 : SPARC_OUTGOING_INT_ARG_FIRST);
5584 int slotno, regno, padding;
5585 rtx reg;
5586
5587 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5588 &regno, &padding);
5589
5590 if (slotno == -1)
5591 return 0;
5592
5593 if (TARGET_ARCH32)
5594 {
5595 reg = gen_rtx_REG (mode, regno);
5596 return reg;
5597 }
5598
5599 if (type && TREE_CODE (type) == RECORD_TYPE)
5600 {
5601 /* Structures up to 16 bytes in size are passed in arg slots on the
5602 stack and are promoted to registers where possible. */
5603
5604 if (int_size_in_bytes (type) > 16)
5605 abort (); /* shouldn't get here */
5606
5607 return function_arg_record_value (type, mode, slotno, named, regbase);
5608 }
5609 else if (type && TREE_CODE (type) == UNION_TYPE)
5610 {
5611 HOST_WIDE_INT size = int_size_in_bytes (type);
5612
5613 if (size > 16)
5614 abort (); /* shouldn't get here */
5615
5616 return function_arg_union_value (size, mode, regno);
5617 }
5618 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5619 but also have the slot allocated for them.
5620 If no prototype is in scope fp values in register slots get passed
5621 in two places, either fp regs and int regs or fp regs and memory. */
5622 else if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5623 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5624 || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
5625 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
5626 && SPARC_FP_REG_P (regno))
5627 {
5628 reg = gen_rtx_REG (mode, regno);
5629 if (cum->prototype_p || cum->libcall_p)
5630 {
5631 /* "* 2" because fp reg numbers are recorded in 4 byte
5632 quantities. */
5633 #if 0
5634 /* ??? This will cause the value to be passed in the fp reg and
5635 in the stack. When a prototype exists we want to pass the
5636 value in the reg but reserve space on the stack. That's an
5637 optimization, and is deferred [for a bit]. */
5638 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5639 return gen_rtx_PARALLEL (mode,
5640 gen_rtvec (2,
5641 gen_rtx_EXPR_LIST (VOIDmode,
5642 NULL_RTX, const0_rtx),
5643 gen_rtx_EXPR_LIST (VOIDmode,
5644 reg, const0_rtx)));
5645 else
5646 #else
5647 /* ??? It seems that passing back a register even when past
5648 the area declared by REG_PARM_STACK_SPACE will allocate
5649 space appropriately, and will not copy the data onto the
5650 stack, exactly as we desire.
5651
5652 This is due to locate_and_pad_parm being called in
5653 expand_call whenever reg_parm_stack_space > 0, which
5654 while beneficial to our example here, would seem to be
5655 in error from what had been intended. Ho hum... -- r~ */
5656 #endif
5657 return reg;
5658 }
5659 else
5660 {
5661 rtx v0, v1;
5662
5663 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5664 {
5665 int intreg;
5666
5667 /* On incoming, we don't need to know that the value
5668 is passed in %f0 and %i0, and it confuses other parts
5669 causing needless spillage even on the simplest cases. */
5670 if (incoming_p)
5671 return reg;
5672
5673 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5674 + (regno - SPARC_FP_ARG_FIRST) / 2);
5675
5676 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5677 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5678 const0_rtx);
5679 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5680 }
5681 else
5682 {
5683 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5684 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5685 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5686 }
5687 }
5688 }
5689 else
5690 {
5691 /* Scalar or complex int. */
5692 reg = gen_rtx_REG (mode, regno);
5693 }
5694
5695 return reg;
5696 }
5697
5698 /* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
5699 For an arg passed partly in registers and partly in memory,
5700 this is the number of registers used.
5701 For args passed entirely in registers or entirely in memory, zero.
5702
5703 Any arg that starts in the first 6 regs but won't entirely fit in them
5704 needs partial registers on v8. On v9, structures with integer
5705 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5706 values that begin in the last fp reg [where "last fp reg" varies with the
5707 mode] will be split between that reg and memory. */
5708
5709 int
5710 function_arg_partial_nregs (const struct sparc_args *cum,
5711 enum machine_mode mode, tree type, int named)
5712 {
5713 int slotno, regno, padding;
5714
5715 /* We pass 0 for incoming_p here, it doesn't matter. */
5716 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5717
5718 if (slotno == -1)
5719 return 0;
5720
5721 if (TARGET_ARCH32)
5722 {
5723 if ((slotno + (mode == BLKmode
5724 ? ROUND_ADVANCE (int_size_in_bytes (type))
5725 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5726 > SPARC_INT_ARG_MAX)
5727 return SPARC_INT_ARG_MAX - slotno;
5728 }
5729 else
5730 {
5731 /* We are guaranteed by pass_by_reference that the size of the
5732 argument is not greater than 16 bytes, so we only need to
5733 return 1 if the argument is partially passed in registers. */
5734
5735 if (type && AGGREGATE_TYPE_P (type))
5736 {
5737 int size = int_size_in_bytes (type);
5738
5739 if (size > UNITS_PER_WORD
5740 && slotno == SPARC_INT_ARG_MAX - 1)
5741 return 1;
5742 }
5743 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5744 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5745 && ! (TARGET_FPU && named)))
5746 {
5747 /* The complex types are passed as packed types. */
5748 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5749 && slotno == SPARC_INT_ARG_MAX - 1)
5750 return 1;
5751 }
5752 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5753 {
5754 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5755 > SPARC_FP_ARG_MAX)
5756 return 1;
5757 }
5758 }
5759
5760 return 0;
5761 }
5762
5763 /* Return true if the argument should be passed by reference.
5764 !v9: The SPARC ABI stipulates passing struct arguments (of any size) and
5765 quad-precision floats by invisible reference.
5766 v9: Aggregates greater than 16 bytes are passed by reference.
5767 For Pascal, also pass arrays by reference. */
5768
5769 static bool
5770 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5771 enum machine_mode mode, tree type,
5772 bool named ATTRIBUTE_UNUSED)
5773 {
5774 if (TARGET_ARCH32)
5775 {
5776 return ((type && AGGREGATE_TYPE_P (type))
5777 /* Extended ABI (as implemented by the Sun compiler) says
5778 that all complex floats are passed in memory. */
5779 || mode == SCmode
5780 /* Enforce the 2-word cap for passing arguments in registers.
5781 This affects CDImode, TFmode, DCmode, TCmode and large
5782 vector modes. */
5783 || GET_MODE_SIZE (mode) > 8);
5784 }
5785 else
5786 {
5787 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5788 || (type
5789 && AGGREGATE_TYPE_P (type)
5790 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5791 /* Enforce the 2-word cap for passing arguments in registers.
5792 This affects CTImode, TCmode and large vector modes. */
5793 || GET_MODE_SIZE (mode) > 16);
5794 }
5795 }
5796
5797 /* Handle the FUNCTION_ARG_ADVANCE macro.
5798 Update the data in CUM to advance over an argument
5799 of mode MODE and data type TYPE.
5800 TYPE is null for libcalls where that information may not be available. */
5801
5802 void
5803 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5804 tree type, int named)
5805 {
5806 int slotno, regno, padding;
5807
5808 /* We pass 0 for incoming_p here, it doesn't matter. */
5809 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5810
5811 /* If register required leading padding, add it. */
5812 if (slotno != -1)
5813 cum->words += padding;
5814
5815 if (TARGET_ARCH32)
5816 {
5817 cum->words += (mode != BLKmode
5818 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5819 : ROUND_ADVANCE (int_size_in_bytes (type)));
5820 }
5821 else
5822 {
5823 if (type && AGGREGATE_TYPE_P (type))
5824 {
5825 int size = int_size_in_bytes (type);
5826
5827 if (size <= 8)
5828 ++cum->words;
5829 else if (size <= 16)
5830 cum->words += 2;
5831 else /* passed by reference */
5832 ++cum->words;
5833 }
5834 else
5835 {
5836 cum->words += (mode != BLKmode
5837 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5838 : ROUND_ADVANCE (int_size_in_bytes (type)));
5839 }
5840 }
5841 }
5842
5843 /* Handle the FUNCTION_ARG_PADDING macro.
5844 For the 64 bit ABI structs are always stored left shifted in their
5845 argument slot. */
5846
5847 enum direction
5848 function_arg_padding (enum machine_mode mode, tree type)
5849 {
5850 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5851 return upward;
5852
5853 /* Fall back to the default. */
5854 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5855 }
5856
5857 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5858 Specify whether to return the return value in memory. */
5859
5860 static bool
5861 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5862 {
5863 if (TARGET_ARCH32)
5864 /* Original SPARC 32-bit ABI says that quad-precision floats
5865 and all structures are returned in memory. Extended ABI
5866 (as implemented by the Sun compiler) says that all complex
5867 floats are returned in registers (8 FP registers at most
5868 for '_Complex long double'). Return all complex integers
5869 in registers (4 at most for '_Complex long long'). */
5870 return (TYPE_MODE (type) == BLKmode
5871 || TYPE_MODE (type) == TFmode
5872 /* Integral vector types follow the scalar FP types conventions. */
5873 || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_VECTOR_INT
5874 && GET_MODE_SIZE (TYPE_MODE (type)) > 8)
5875 /* FP vector types follow the complex FP types conventions. */
5876 || (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_VECTOR_FLOAT
5877 && GET_MODE_SIZE (TYPE_MODE (type)) > 32));
5878 else
5879 /* Original SPARC 64-bit ABI says that structures and unions
5880 smaller than 32 bytes are returned in registers. Extended
5881 ABI (as implemented by the Sun compiler) says that all complex
5882 floats are returned in registers (8 FP registers at most
5883 for '_Complex long double'). Return all complex integers
5884 in registers (4 at most for '_Complex TItype'). */
5885 return ((TYPE_MODE (type) == BLKmode
5886 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32)
5887 || GET_MODE_SIZE (TYPE_MODE (type)) > 32);
5888 }
5889
5890 /* Handle the TARGET_STRUCT_VALUE target hook.
5891 Return where to find the structure return value address. */
5892
5893 static rtx
5894 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
5895 {
5896 if (TARGET_ARCH64)
5897 return 0;
5898 else
5899 {
5900 if (incoming)
5901 return gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5902 STRUCT_VALUE_OFFSET));
5903 else
5904 return gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5905 STRUCT_VALUE_OFFSET));
5906 }
5907 }
5908
5909 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5910 For v9, function return values are subject to the same rules as arguments,
5911 except that up to 32 bytes may be returned in registers. */
5912
5913 rtx
5914 function_value (tree type, enum machine_mode mode, int incoming_p)
5915 {
5916 /* Beware that the two values are swapped here wrt function_arg. */
5917 int regbase = (incoming_p
5918 ? SPARC_OUTGOING_INT_ARG_FIRST
5919 : SPARC_INCOMING_INT_ARG_FIRST);
5920 int regno;
5921
5922 if (TARGET_ARCH64 && type)
5923 {
5924 if (TREE_CODE (type) == RECORD_TYPE)
5925 {
5926 /* Structures up to 32 bytes in size are passed in registers,
5927 promoted to fp registers where possible. */
5928
5929 if (int_size_in_bytes (type) > 32)
5930 abort (); /* shouldn't get here */
5931
5932 return function_arg_record_value (type, mode, 0, 1, regbase);
5933 }
5934 else if (TREE_CODE (type) == UNION_TYPE)
5935 {
5936 HOST_WIDE_INT size = int_size_in_bytes (type);
5937
5938 if (size > 32)
5939 abort (); /* shouldn't get here */
5940
5941 return function_arg_union_value (size, mode, regbase);
5942 }
5943 else if (AGGREGATE_TYPE_P (type))
5944 {
5945 /* All other aggregate types are passed in an integer register
5946 in a mode corresponding to the size of the type. */
5947 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5948
5949 if (bytes > 32)
5950 abort (); /* shouldn't get here */
5951
5952 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5953
5954 /* ??? We probably should have made the same ABI change in
5955 3.4.0 as the one we made for unions. The latter was
5956 required by the SCD though, while the former is not
5957 specified, so we favored compatibility and efficiency.
5958
5959 Now we're stuck for aggregates larger than 16 bytes,
5960 because OImode vanished in the meantime. Let's not
5961 try to be unduly clever, and simply follow the ABI
5962 for unions in that case. */
5963 if (mode == BLKmode)
5964 return function_arg_union_value (bytes, mode, regbase);
5965 }
5966 else if (GET_MODE_CLASS (mode) == MODE_INT
5967 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5968 mode = word_mode;
5969 }
5970
5971 if (TARGET_FPU && (FLOAT_MODE_P (mode) || VECTOR_MODE_P (mode)))
5972 regno = SPARC_FP_ARG_FIRST;
5973 else
5974 regno = regbase;
5975
5976 return gen_rtx_REG (mode, regno);
5977 }
5978
5979 /* Do what is necessary for `va_start'. We look at the current function
5980 to determine if stdarg or varargs is used and return the address of
5981 the first unnamed parameter. */
5982
5983 static rtx
5984 sparc_builtin_saveregs (void)
5985 {
5986 int first_reg = current_function_args_info.words;
5987 rtx address;
5988 int regno;
5989
5990 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5991 emit_move_insn (gen_rtx_MEM (word_mode,
5992 gen_rtx_PLUS (Pmode,
5993 frame_pointer_rtx,
5994 GEN_INT (FIRST_PARM_OFFSET (0)
5995 + (UNITS_PER_WORD
5996 * regno)))),
5997 gen_rtx_REG (word_mode,
5998 SPARC_INCOMING_INT_ARG_FIRST + regno));
5999
6000 address = gen_rtx_PLUS (Pmode,
6001 frame_pointer_rtx,
6002 GEN_INT (FIRST_PARM_OFFSET (0)
6003 + UNITS_PER_WORD * first_reg));
6004
6005 return address;
6006 }
6007
6008 /* Implement `va_start' for stdarg. */
6009
6010 void
6011 sparc_va_start (tree valist, rtx nextarg)
6012 {
6013 nextarg = expand_builtin_saveregs ();
6014 std_expand_builtin_va_start (valist, nextarg);
6015 }
6016
6017 /* Implement `va_arg' for stdarg. */
6018
6019 static tree
6020 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6021 {
6022 HOST_WIDE_INT size, rsize, align;
6023 tree addr, incr;
6024 bool indirect;
6025 tree ptrtype = build_pointer_type (type);
6026
6027 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6028 {
6029 indirect = true;
6030 size = rsize = UNITS_PER_WORD;
6031 align = 0;
6032 }
6033 else
6034 {
6035 indirect = false;
6036 size = int_size_in_bytes (type);
6037 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6038 align = 0;
6039
6040 if (TARGET_ARCH64)
6041 {
6042 /* For SPARC64, objects requiring 16-byte alignment get it. */
6043 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6044 align = 2 * UNITS_PER_WORD;
6045
6046 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6047 are left-justified in their slots. */
6048 if (AGGREGATE_TYPE_P (type))
6049 {
6050 if (size == 0)
6051 size = rsize = UNITS_PER_WORD;
6052 else
6053 size = rsize;
6054 }
6055 }
6056 }
6057
6058 incr = valist;
6059 if (align)
6060 {
6061 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
6062 ssize_int (align - 1)));
6063 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
6064 ssize_int (-align)));
6065 }
6066
6067 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6068 addr = incr;
6069
6070 if (BYTES_BIG_ENDIAN && size < rsize)
6071 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
6072 ssize_int (rsize - size)));
6073
6074 if (indirect)
6075 {
6076 addr = fold_convert (build_pointer_type (ptrtype), addr);
6077 addr = build_va_arg_indirect_ref (addr);
6078 }
6079 /* If the address isn't aligned properly for the type,
6080 we may need to copy to a temporary.
6081 FIXME: This is inefficient. Usually we can do this
6082 in registers. */
6083 else if (align == 0
6084 && TYPE_ALIGN (type) > BITS_PER_WORD)
6085 {
6086 tree tmp = create_tmp_var (type, "va_arg_tmp");
6087 tree dest_addr = build_fold_addr_expr (tmp);
6088
6089 tree copy = build_function_call_expr
6090 (implicit_built_in_decls[BUILT_IN_MEMCPY],
6091 tree_cons (NULL_TREE, dest_addr,
6092 tree_cons (NULL_TREE, addr,
6093 tree_cons (NULL_TREE, size_int (rsize),
6094 NULL_TREE))));
6095
6096 gimplify_and_add (copy, pre_p);
6097 addr = dest_addr;
6098 }
6099 else
6100 addr = fold_convert (ptrtype, addr);
6101
6102 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
6103 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
6104 gimplify_and_add (incr, post_p);
6105
6106 return build_va_arg_indirect_ref (addr);
6107 }
6108 \f
6109 /* Return the string to output an unconditional branch to LABEL, which is
6110 the operand number of the label.
6111
6112 DEST is the destination insn (i.e. the label), INSN is the source. */
6113
6114 const char *
6115 output_ubranch (rtx dest, int label, rtx insn)
6116 {
6117 static char string[64];
6118 bool v9_form = false;
6119 char *p;
6120
6121 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6122 {
6123 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6124 - INSN_ADDRESSES (INSN_UID (insn)));
6125 /* Leave some instructions for "slop". */
6126 if (delta >= -260000 && delta < 260000)
6127 v9_form = true;
6128 }
6129
6130 if (v9_form)
6131 strcpy (string, "ba%*,pt\t%%xcc, ");
6132 else
6133 strcpy (string, "b%*\t");
6134
6135 p = strchr (string, '\0');
6136 *p++ = '%';
6137 *p++ = 'l';
6138 *p++ = '0' + label;
6139 *p++ = '%';
6140 *p++ = '(';
6141 *p = '\0';
6142
6143 return string;
6144 }
6145
6146 /* Return the string to output a conditional branch to LABEL, which is
6147 the operand number of the label. OP is the conditional expression.
6148 XEXP (OP, 0) is assumed to be a condition code register (integer or
6149 floating point) and its mode specifies what kind of comparison we made.
6150
6151 DEST is the destination insn (i.e. the label), INSN is the source.
6152
6153 REVERSED is nonzero if we should reverse the sense of the comparison.
6154
6155 ANNUL is nonzero if we should generate an annulling branch. */
6156
6157 const char *
6158 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6159 rtx insn)
6160 {
6161 static char string[64];
6162 enum rtx_code code = GET_CODE (op);
6163 rtx cc_reg = XEXP (op, 0);
6164 enum machine_mode mode = GET_MODE (cc_reg);
6165 const char *labelno, *branch;
6166 int spaces = 8, far;
6167 char *p;
6168
6169 /* v9 branches are limited to +-1MB. If it is too far away,
6170 change
6171
6172 bne,pt %xcc, .LC30
6173
6174 to
6175
6176 be,pn %xcc, .+12
6177 nop
6178 ba .LC30
6179
6180 and
6181
6182 fbne,a,pn %fcc2, .LC29
6183
6184 to
6185
6186 fbe,pt %fcc2, .+16
6187 nop
6188 ba .LC29 */
6189
6190 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6191 if (reversed ^ far)
6192 {
6193 /* Reversal of FP compares takes care -- an ordered compare
6194 becomes an unordered compare and vice versa. */
6195 if (mode == CCFPmode || mode == CCFPEmode)
6196 code = reverse_condition_maybe_unordered (code);
6197 else
6198 code = reverse_condition (code);
6199 }
6200
6201 /* Start by writing the branch condition. */
6202 if (mode == CCFPmode || mode == CCFPEmode)
6203 {
6204 switch (code)
6205 {
6206 case NE:
6207 branch = "fbne";
6208 break;
6209 case EQ:
6210 branch = "fbe";
6211 break;
6212 case GE:
6213 branch = "fbge";
6214 break;
6215 case GT:
6216 branch = "fbg";
6217 break;
6218 case LE:
6219 branch = "fble";
6220 break;
6221 case LT:
6222 branch = "fbl";
6223 break;
6224 case UNORDERED:
6225 branch = "fbu";
6226 break;
6227 case ORDERED:
6228 branch = "fbo";
6229 break;
6230 case UNGT:
6231 branch = "fbug";
6232 break;
6233 case UNLT:
6234 branch = "fbul";
6235 break;
6236 case UNEQ:
6237 branch = "fbue";
6238 break;
6239 case UNGE:
6240 branch = "fbuge";
6241 break;
6242 case UNLE:
6243 branch = "fbule";
6244 break;
6245 case LTGT:
6246 branch = "fblg";
6247 break;
6248
6249 default:
6250 abort ();
6251 }
6252
6253 /* ??? !v9: FP branches cannot be preceded by another floating point
6254 insn. Because there is currently no concept of pre-delay slots,
6255 we can fix this only by always emitting a nop before a floating
6256 point branch. */
6257
6258 string[0] = '\0';
6259 if (! TARGET_V9)
6260 strcpy (string, "nop\n\t");
6261 strcat (string, branch);
6262 }
6263 else
6264 {
6265 switch (code)
6266 {
6267 case NE:
6268 branch = "bne";
6269 break;
6270 case EQ:
6271 branch = "be";
6272 break;
6273 case GE:
6274 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6275 branch = "bpos";
6276 else
6277 branch = "bge";
6278 break;
6279 case GT:
6280 branch = "bg";
6281 break;
6282 case LE:
6283 branch = "ble";
6284 break;
6285 case LT:
6286 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6287 branch = "bneg";
6288 else
6289 branch = "bl";
6290 break;
6291 case GEU:
6292 branch = "bgeu";
6293 break;
6294 case GTU:
6295 branch = "bgu";
6296 break;
6297 case LEU:
6298 branch = "bleu";
6299 break;
6300 case LTU:
6301 branch = "blu";
6302 break;
6303
6304 default:
6305 abort ();
6306 }
6307 strcpy (string, branch);
6308 }
6309 spaces -= strlen (branch);
6310 p = strchr (string, '\0');
6311
6312 /* Now add the annulling, the label, and a possible noop. */
6313 if (annul && ! far)
6314 {
6315 strcpy (p, ",a");
6316 p += 2;
6317 spaces -= 2;
6318 }
6319
6320 if (TARGET_V9)
6321 {
6322 rtx note;
6323 int v8 = 0;
6324
6325 if (! far && insn && INSN_ADDRESSES_SET_P ())
6326 {
6327 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6328 - INSN_ADDRESSES (INSN_UID (insn)));
6329 /* Leave some instructions for "slop". */
6330 if (delta < -260000 || delta >= 260000)
6331 v8 = 1;
6332 }
6333
6334 if (mode == CCFPmode || mode == CCFPEmode)
6335 {
6336 static char v9_fcc_labelno[] = "%%fccX, ";
6337 /* Set the char indicating the number of the fcc reg to use. */
6338 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6339 labelno = v9_fcc_labelno;
6340 if (v8)
6341 {
6342 if (REGNO (cc_reg) == SPARC_FCC_REG)
6343 labelno = "";
6344 else
6345 abort ();
6346 }
6347 }
6348 else if (mode == CCXmode || mode == CCX_NOOVmode)
6349 {
6350 labelno = "%%xcc, ";
6351 if (v8)
6352 abort ();
6353 }
6354 else
6355 {
6356 labelno = "%%icc, ";
6357 if (v8)
6358 labelno = "";
6359 }
6360
6361 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6362 {
6363 strcpy (p,
6364 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6365 ? ",pt" : ",pn");
6366 p += 3;
6367 spaces -= 3;
6368 }
6369 }
6370 else
6371 labelno = "";
6372
6373 if (spaces > 0)
6374 *p++ = '\t';
6375 else
6376 *p++ = ' ';
6377 strcpy (p, labelno);
6378 p = strchr (p, '\0');
6379 if (far)
6380 {
6381 strcpy (p, ".+12\n\t nop\n\tb\t");
6382 /* Skip the next insn if requested or
6383 if we know that it will be a nop. */
6384 if (annul || ! final_sequence)
6385 p[3] = '6';
6386 p += 14;
6387 }
6388 *p++ = '%';
6389 *p++ = 'l';
6390 *p++ = label + '0';
6391 *p++ = '%';
6392 *p++ = '#';
6393 *p = '\0';
6394
6395 return string;
6396 }
6397
6398 /* Emit a library call comparison between floating point X and Y.
6399 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6400 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6401 values as arguments instead of the TFmode registers themselves,
6402 that's why we cannot call emit_float_lib_cmp. */
6403 void
6404 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6405 {
6406 const char *qpfunc;
6407 rtx slot0, slot1, result, tem, tem2;
6408 enum machine_mode mode;
6409
6410 switch (comparison)
6411 {
6412 case EQ:
6413 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6414 break;
6415
6416 case NE:
6417 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6418 break;
6419
6420 case GT:
6421 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6422 break;
6423
6424 case GE:
6425 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6426 break;
6427
6428 case LT:
6429 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6430 break;
6431
6432 case LE:
6433 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6434 break;
6435
6436 case ORDERED:
6437 case UNORDERED:
6438 case UNGT:
6439 case UNLT:
6440 case UNEQ:
6441 case UNGE:
6442 case UNLE:
6443 case LTGT:
6444 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6445 break;
6446
6447 default:
6448 abort();
6449 break;
6450 }
6451
6452 if (TARGET_ARCH64)
6453 {
6454 if (GET_CODE (x) != MEM)
6455 {
6456 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6457 emit_insn (gen_rtx_SET (VOIDmode, slot0, x));
6458 }
6459 else
6460 slot0 = x;
6461
6462 if (GET_CODE (y) != MEM)
6463 {
6464 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6465 emit_insn (gen_rtx_SET (VOIDmode, slot1, y));
6466 }
6467 else
6468 slot1 = y;
6469
6470 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6471 DImode, 2,
6472 XEXP (slot0, 0), Pmode,
6473 XEXP (slot1, 0), Pmode);
6474
6475 mode = DImode;
6476 }
6477 else
6478 {
6479 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6480 SImode, 2,
6481 x, TFmode, y, TFmode);
6482
6483 mode = SImode;
6484 }
6485
6486
6487 /* Immediately move the result of the libcall into a pseudo
6488 register so reload doesn't clobber the value if it needs
6489 the return register for a spill reg. */
6490 result = gen_reg_rtx (mode);
6491 emit_move_insn (result, hard_libcall_value (mode));
6492
6493 switch (comparison)
6494 {
6495 default:
6496 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6497 break;
6498 case ORDERED:
6499 case UNORDERED:
6500 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6501 NULL_RTX, mode, 0);
6502 break;
6503 case UNGT:
6504 case UNGE:
6505 emit_cmp_insn (result, const1_rtx,
6506 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6507 break;
6508 case UNLE:
6509 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6510 break;
6511 case UNLT:
6512 tem = gen_reg_rtx (mode);
6513 if (TARGET_ARCH32)
6514 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6515 else
6516 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6517 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6518 break;
6519 case UNEQ:
6520 case LTGT:
6521 tem = gen_reg_rtx (mode);
6522 if (TARGET_ARCH32)
6523 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6524 else
6525 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6526 tem2 = gen_reg_rtx (mode);
6527 if (TARGET_ARCH32)
6528 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6529 else
6530 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6531 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6532 NULL_RTX, mode, 0);
6533 break;
6534 }
6535 }
6536
6537 /* Generate an unsigned DImode to FP conversion. This is the same code
6538 optabs would emit if we didn't have TFmode patterns. */
6539
6540 void
6541 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6542 {
6543 rtx neglab, donelab, i0, i1, f0, in, out;
6544
6545 out = operands[0];
6546 in = force_reg (DImode, operands[1]);
6547 neglab = gen_label_rtx ();
6548 donelab = gen_label_rtx ();
6549 i0 = gen_reg_rtx (DImode);
6550 i1 = gen_reg_rtx (DImode);
6551 f0 = gen_reg_rtx (mode);
6552
6553 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6554
6555 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6556 emit_jump_insn (gen_jump (donelab));
6557 emit_barrier ();
6558
6559 emit_label (neglab);
6560
6561 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6562 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6563 emit_insn (gen_iordi3 (i0, i0, i1));
6564 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6565 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6566
6567 emit_label (donelab);
6568 }
6569
6570 /* Generate an FP to unsigned DImode conversion. This is the same code
6571 optabs would emit if we didn't have TFmode patterns. */
6572
6573 void
6574 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6575 {
6576 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6577
6578 out = operands[0];
6579 in = force_reg (mode, operands[1]);
6580 neglab = gen_label_rtx ();
6581 donelab = gen_label_rtx ();
6582 i0 = gen_reg_rtx (DImode);
6583 i1 = gen_reg_rtx (DImode);
6584 limit = gen_reg_rtx (mode);
6585 f0 = gen_reg_rtx (mode);
6586
6587 emit_move_insn (limit,
6588 CONST_DOUBLE_FROM_REAL_VALUE (
6589 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6590 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6591
6592 emit_insn (gen_rtx_SET (VOIDmode,
6593 out,
6594 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6595 emit_jump_insn (gen_jump (donelab));
6596 emit_barrier ();
6597
6598 emit_label (neglab);
6599
6600 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6601 emit_insn (gen_rtx_SET (VOIDmode,
6602 i0,
6603 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6604 emit_insn (gen_movdi (i1, const1_rtx));
6605 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6606 emit_insn (gen_xordi3 (out, i0, i1));
6607
6608 emit_label (donelab);
6609 }
6610
6611 /* Return the string to output a conditional branch to LABEL, testing
6612 register REG. LABEL is the operand number of the label; REG is the
6613 operand number of the reg. OP is the conditional expression. The mode
6614 of REG says what kind of comparison we made.
6615
6616 DEST is the destination insn (i.e. the label), INSN is the source.
6617
6618 REVERSED is nonzero if we should reverse the sense of the comparison.
6619
6620 ANNUL is nonzero if we should generate an annulling branch. */
6621
6622 const char *
6623 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6624 int annul, rtx insn)
6625 {
6626 static char string[64];
6627 enum rtx_code code = GET_CODE (op);
6628 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6629 rtx note;
6630 int far;
6631 char *p;
6632
6633 /* branch on register are limited to +-128KB. If it is too far away,
6634 change
6635
6636 brnz,pt %g1, .LC30
6637
6638 to
6639
6640 brz,pn %g1, .+12
6641 nop
6642 ba,pt %xcc, .LC30
6643
6644 and
6645
6646 brgez,a,pn %o1, .LC29
6647
6648 to
6649
6650 brlz,pt %o1, .+16
6651 nop
6652 ba,pt %xcc, .LC29 */
6653
6654 far = get_attr_length (insn) >= 3;
6655
6656 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6657 if (reversed ^ far)
6658 code = reverse_condition (code);
6659
6660 /* Only 64 bit versions of these instructions exist. */
6661 if (mode != DImode)
6662 abort ();
6663
6664 /* Start by writing the branch condition. */
6665
6666 switch (code)
6667 {
6668 case NE:
6669 strcpy (string, "brnz");
6670 break;
6671
6672 case EQ:
6673 strcpy (string, "brz");
6674 break;
6675
6676 case GE:
6677 strcpy (string, "brgez");
6678 break;
6679
6680 case LT:
6681 strcpy (string, "brlz");
6682 break;
6683
6684 case LE:
6685 strcpy (string, "brlez");
6686 break;
6687
6688 case GT:
6689 strcpy (string, "brgz");
6690 break;
6691
6692 default:
6693 abort ();
6694 }
6695
6696 p = strchr (string, '\0');
6697
6698 /* Now add the annulling, reg, label, and nop. */
6699 if (annul && ! far)
6700 {
6701 strcpy (p, ",a");
6702 p += 2;
6703 }
6704
6705 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6706 {
6707 strcpy (p,
6708 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6709 ? ",pt" : ",pn");
6710 p += 3;
6711 }
6712
6713 *p = p < string + 8 ? '\t' : ' ';
6714 p++;
6715 *p++ = '%';
6716 *p++ = '0' + reg;
6717 *p++ = ',';
6718 *p++ = ' ';
6719 if (far)
6720 {
6721 int veryfar = 1, delta;
6722
6723 if (INSN_ADDRESSES_SET_P ())
6724 {
6725 delta = (INSN_ADDRESSES (INSN_UID (dest))
6726 - INSN_ADDRESSES (INSN_UID (insn)));
6727 /* Leave some instructions for "slop". */
6728 if (delta >= -260000 && delta < 260000)
6729 veryfar = 0;
6730 }
6731
6732 strcpy (p, ".+12\n\t nop\n\t");
6733 /* Skip the next insn if requested or
6734 if we know that it will be a nop. */
6735 if (annul || ! final_sequence)
6736 p[3] = '6';
6737 p += 12;
6738 if (veryfar)
6739 {
6740 strcpy (p, "b\t");
6741 p += 2;
6742 }
6743 else
6744 {
6745 strcpy (p, "ba,pt\t%%xcc, ");
6746 p += 13;
6747 }
6748 }
6749 *p++ = '%';
6750 *p++ = 'l';
6751 *p++ = '0' + label;
6752 *p++ = '%';
6753 *p++ = '#';
6754 *p = '\0';
6755
6756 return string;
6757 }
6758
6759 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6760 Such instructions cannot be used in the delay slot of return insn on v9.
6761 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6762 */
6763
6764 static int
6765 epilogue_renumber (register rtx *where, int test)
6766 {
6767 register const char *fmt;
6768 register int i;
6769 register enum rtx_code code;
6770
6771 if (*where == 0)
6772 return 0;
6773
6774 code = GET_CODE (*where);
6775
6776 switch (code)
6777 {
6778 case REG:
6779 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6780 return 1;
6781 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6782 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6783 case SCRATCH:
6784 case CC0:
6785 case PC:
6786 case CONST_INT:
6787 case CONST_DOUBLE:
6788 return 0;
6789
6790 /* Do not replace the frame pointer with the stack pointer because
6791 it can cause the delayed instruction to load below the stack.
6792 This occurs when instructions like:
6793
6794 (set (reg/i:SI 24 %i0)
6795 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6796 (const_int -20 [0xffffffec])) 0))
6797
6798 are in the return delayed slot. */
6799 case PLUS:
6800 if (GET_CODE (XEXP (*where, 0)) == REG
6801 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6802 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6803 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6804 return 1;
6805 break;
6806
6807 case MEM:
6808 if (SPARC_STACK_BIAS
6809 && GET_CODE (XEXP (*where, 0)) == REG
6810 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6811 return 1;
6812 break;
6813
6814 default:
6815 break;
6816 }
6817
6818 fmt = GET_RTX_FORMAT (code);
6819
6820 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6821 {
6822 if (fmt[i] == 'E')
6823 {
6824 register int j;
6825 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6826 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6827 return 1;
6828 }
6829 else if (fmt[i] == 'e'
6830 && epilogue_renumber (&(XEXP (*where, i)), test))
6831 return 1;
6832 }
6833 return 0;
6834 }
6835 \f
6836 /* Leaf functions and non-leaf functions have different needs. */
6837
6838 static const int
6839 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6840
6841 static const int
6842 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6843
6844 static const int *const reg_alloc_orders[] = {
6845 reg_leaf_alloc_order,
6846 reg_nonleaf_alloc_order};
6847
6848 void
6849 order_regs_for_local_alloc (void)
6850 {
6851 static int last_order_nonleaf = 1;
6852
6853 if (regs_ever_live[15] != last_order_nonleaf)
6854 {
6855 last_order_nonleaf = !last_order_nonleaf;
6856 memcpy ((char *) reg_alloc_order,
6857 (const char *) reg_alloc_orders[last_order_nonleaf],
6858 FIRST_PSEUDO_REGISTER * sizeof (int));
6859 }
6860 }
6861 \f
6862 /* Return 1 if REG and MEM are legitimate enough to allow the various
6863 mem<-->reg splits to be run. */
6864
6865 int
6866 sparc_splitdi_legitimate (rtx reg, rtx mem)
6867 {
6868 /* Punt if we are here by mistake. */
6869 if (! reload_completed)
6870 abort ();
6871
6872 /* We must have an offsettable memory reference. */
6873 if (! offsettable_memref_p (mem))
6874 return 0;
6875
6876 /* If we have legitimate args for ldd/std, we do not want
6877 the split to happen. */
6878 if ((REGNO (reg) % 2) == 0
6879 && mem_min_alignment (mem, 8))
6880 return 0;
6881
6882 /* Success. */
6883 return 1;
6884 }
6885
6886 /* Return 1 if x and y are some kind of REG and they refer to
6887 different hard registers. This test is guaranteed to be
6888 run after reload. */
6889
6890 int
6891 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6892 {
6893 if (GET_CODE (x) != REG)
6894 return 0;
6895 if (GET_CODE (y) != REG)
6896 return 0;
6897 if (REGNO (x) == REGNO (y))
6898 return 0;
6899 return 1;
6900 }
6901
6902 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6903 This makes them candidates for using ldd and std insns.
6904
6905 Note reg1 and reg2 *must* be hard registers. */
6906
6907 int
6908 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6909 {
6910 /* We might have been passed a SUBREG. */
6911 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6912 return 0;
6913
6914 if (REGNO (reg1) % 2 != 0)
6915 return 0;
6916
6917 /* Integer ldd is deprecated in SPARC V9 */
6918 if (TARGET_V9 && REGNO (reg1) < 32)
6919 return 0;
6920
6921 return (REGNO (reg1) == REGNO (reg2) - 1);
6922 }
6923
6924 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6925 an ldd or std insn.
6926
6927 This can only happen when addr1 and addr2, the addresses in mem1
6928 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6929 addr1 must also be aligned on a 64-bit boundary.
6930
6931 Also iff dependent_reg_rtx is not null it should not be used to
6932 compute the address for mem1, i.e. we cannot optimize a sequence
6933 like:
6934 ld [%o0], %o0
6935 ld [%o0 + 4], %o1
6936 to
6937 ldd [%o0], %o0
6938 nor:
6939 ld [%g3 + 4], %g3
6940 ld [%g3], %g2
6941 to
6942 ldd [%g3], %g2
6943
6944 But, note that the transformation from:
6945 ld [%g2 + 4], %g3
6946 ld [%g2], %g2
6947 to
6948 ldd [%g2], %g2
6949 is perfectly fine. Thus, the peephole2 patterns always pass us
6950 the destination register of the first load, never the second one.
6951
6952 For stores we don't have a similar problem, so dependent_reg_rtx is
6953 NULL_RTX. */
6954
6955 int
6956 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6957 {
6958 rtx addr1, addr2;
6959 unsigned int reg1;
6960 HOST_WIDE_INT offset1;
6961
6962 /* The mems cannot be volatile. */
6963 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6964 return 0;
6965
6966 /* MEM1 should be aligned on a 64-bit boundary. */
6967 if (MEM_ALIGN (mem1) < 64)
6968 return 0;
6969
6970 addr1 = XEXP (mem1, 0);
6971 addr2 = XEXP (mem2, 0);
6972
6973 /* Extract a register number and offset (if used) from the first addr. */
6974 if (GET_CODE (addr1) == PLUS)
6975 {
6976 /* If not a REG, return zero. */
6977 if (GET_CODE (XEXP (addr1, 0)) != REG)
6978 return 0;
6979 else
6980 {
6981 reg1 = REGNO (XEXP (addr1, 0));
6982 /* The offset must be constant! */
6983 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6984 return 0;
6985 offset1 = INTVAL (XEXP (addr1, 1));
6986 }
6987 }
6988 else if (GET_CODE (addr1) != REG)
6989 return 0;
6990 else
6991 {
6992 reg1 = REGNO (addr1);
6993 /* This was a simple (mem (reg)) expression. Offset is 0. */
6994 offset1 = 0;
6995 }
6996
6997 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6998 if (GET_CODE (addr2) != PLUS)
6999 return 0;
7000
7001 if (GET_CODE (XEXP (addr2, 0)) != REG
7002 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7003 return 0;
7004
7005 if (reg1 != REGNO (XEXP (addr2, 0)))
7006 return 0;
7007
7008 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7009 return 0;
7010
7011 /* The first offset must be evenly divisible by 8 to ensure the
7012 address is 64 bit aligned. */
7013 if (offset1 % 8 != 0)
7014 return 0;
7015
7016 /* The offset for the second addr must be 4 more than the first addr. */
7017 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7018 return 0;
7019
7020 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7021 instructions. */
7022 return 1;
7023 }
7024
7025 /* Return 1 if reg is a pseudo, or is the first register in
7026 a hard register pair. This makes it a candidate for use in
7027 ldd and std insns. */
7028
7029 int
7030 register_ok_for_ldd (rtx reg)
7031 {
7032 /* We might have been passed a SUBREG. */
7033 if (GET_CODE (reg) != REG)
7034 return 0;
7035
7036 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7037 return (REGNO (reg) % 2 == 0);
7038 else
7039 return 1;
7040 }
7041 \f
7042 /* Print operand X (an rtx) in assembler syntax to file FILE.
7043 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7044 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7045
7046 void
7047 print_operand (FILE *file, rtx x, int code)
7048 {
7049 switch (code)
7050 {
7051 case '#':
7052 /* Output an insn in a delay slot. */
7053 if (final_sequence)
7054 sparc_indent_opcode = 1;
7055 else
7056 fputs ("\n\t nop", file);
7057 return;
7058 case '*':
7059 /* Output an annul flag if there's nothing for the delay slot and we
7060 are optimizing. This is always used with '(' below.
7061 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7062 this is a dbx bug. So, we only do this when optimizing.
7063 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7064 Always emit a nop in case the next instruction is a branch. */
7065 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7066 fputs (",a", file);
7067 return;
7068 case '(':
7069 /* Output a 'nop' if there's nothing for the delay slot and we are
7070 not optimizing. This is always used with '*' above. */
7071 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7072 fputs ("\n\t nop", file);
7073 else if (final_sequence)
7074 sparc_indent_opcode = 1;
7075 return;
7076 case ')':
7077 /* Output the right displacement from the saved PC on function return.
7078 The caller may have placed an "unimp" insn immediately after the call
7079 so we have to account for it. This insn is used in the 32-bit ABI
7080 when calling a function that returns a non zero-sized structure. The
7081 64-bit ABI doesn't have it. Be careful to have this test be the same
7082 as that used on the call. */
7083 if (! TARGET_ARCH64
7084 && current_function_returns_struct
7085 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7086 == INTEGER_CST)
7087 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7088 fputs ("12", file);
7089 else
7090 fputc ('8', file);
7091 return;
7092 case '_':
7093 /* Output the Embedded Medium/Anywhere code model base register. */
7094 fputs (EMBMEDANY_BASE_REG, file);
7095 return;
7096 case '&':
7097 /* Print some local dynamic TLS name. */
7098 assemble_name (file, get_some_local_dynamic_name ());
7099 return;
7100
7101 case 'Y':
7102 /* Adjust the operand to take into account a RESTORE operation. */
7103 if (GET_CODE (x) == CONST_INT)
7104 break;
7105 else if (GET_CODE (x) != REG)
7106 output_operand_lossage ("invalid %%Y operand");
7107 else if (REGNO (x) < 8)
7108 fputs (reg_names[REGNO (x)], file);
7109 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7110 fputs (reg_names[REGNO (x)-16], file);
7111 else
7112 output_operand_lossage ("invalid %%Y operand");
7113 return;
7114 case 'L':
7115 /* Print out the low order register name of a register pair. */
7116 if (WORDS_BIG_ENDIAN)
7117 fputs (reg_names[REGNO (x)+1], file);
7118 else
7119 fputs (reg_names[REGNO (x)], file);
7120 return;
7121 case 'H':
7122 /* Print out the high order register name of a register pair. */
7123 if (WORDS_BIG_ENDIAN)
7124 fputs (reg_names[REGNO (x)], file);
7125 else
7126 fputs (reg_names[REGNO (x)+1], file);
7127 return;
7128 case 'R':
7129 /* Print out the second register name of a register pair or quad.
7130 I.e., R (%o0) => %o1. */
7131 fputs (reg_names[REGNO (x)+1], file);
7132 return;
7133 case 'S':
7134 /* Print out the third register name of a register quad.
7135 I.e., S (%o0) => %o2. */
7136 fputs (reg_names[REGNO (x)+2], file);
7137 return;
7138 case 'T':
7139 /* Print out the fourth register name of a register quad.
7140 I.e., T (%o0) => %o3. */
7141 fputs (reg_names[REGNO (x)+3], file);
7142 return;
7143 case 'x':
7144 /* Print a condition code register. */
7145 if (REGNO (x) == SPARC_ICC_REG)
7146 {
7147 /* We don't handle CC[X]_NOOVmode because they're not supposed
7148 to occur here. */
7149 if (GET_MODE (x) == CCmode)
7150 fputs ("%icc", file);
7151 else if (GET_MODE (x) == CCXmode)
7152 fputs ("%xcc", file);
7153 else
7154 abort ();
7155 }
7156 else
7157 /* %fccN register */
7158 fputs (reg_names[REGNO (x)], file);
7159 return;
7160 case 'm':
7161 /* Print the operand's address only. */
7162 output_address (XEXP (x, 0));
7163 return;
7164 case 'r':
7165 /* In this case we need a register. Use %g0 if the
7166 operand is const0_rtx. */
7167 if (x == const0_rtx
7168 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7169 {
7170 fputs ("%g0", file);
7171 return;
7172 }
7173 else
7174 break;
7175
7176 case 'A':
7177 switch (GET_CODE (x))
7178 {
7179 case IOR: fputs ("or", file); break;
7180 case AND: fputs ("and", file); break;
7181 case XOR: fputs ("xor", file); break;
7182 default: output_operand_lossage ("invalid %%A operand");
7183 }
7184 return;
7185
7186 case 'B':
7187 switch (GET_CODE (x))
7188 {
7189 case IOR: fputs ("orn", file); break;
7190 case AND: fputs ("andn", file); break;
7191 case XOR: fputs ("xnor", file); break;
7192 default: output_operand_lossage ("invalid %%B operand");
7193 }
7194 return;
7195
7196 /* These are used by the conditional move instructions. */
7197 case 'c' :
7198 case 'C':
7199 {
7200 enum rtx_code rc = GET_CODE (x);
7201
7202 if (code == 'c')
7203 {
7204 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7205 if (mode == CCFPmode || mode == CCFPEmode)
7206 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7207 else
7208 rc = reverse_condition (GET_CODE (x));
7209 }
7210 switch (rc)
7211 {
7212 case NE: fputs ("ne", file); break;
7213 case EQ: fputs ("e", file); break;
7214 case GE: fputs ("ge", file); break;
7215 case GT: fputs ("g", file); break;
7216 case LE: fputs ("le", file); break;
7217 case LT: fputs ("l", file); break;
7218 case GEU: fputs ("geu", file); break;
7219 case GTU: fputs ("gu", file); break;
7220 case LEU: fputs ("leu", file); break;
7221 case LTU: fputs ("lu", file); break;
7222 case LTGT: fputs ("lg", file); break;
7223 case UNORDERED: fputs ("u", file); break;
7224 case ORDERED: fputs ("o", file); break;
7225 case UNLT: fputs ("ul", file); break;
7226 case UNLE: fputs ("ule", file); break;
7227 case UNGT: fputs ("ug", file); break;
7228 case UNGE: fputs ("uge", file); break;
7229 case UNEQ: fputs ("ue", file); break;
7230 default: output_operand_lossage (code == 'c'
7231 ? "invalid %%c operand"
7232 : "invalid %%C operand");
7233 }
7234 return;
7235 }
7236
7237 /* These are used by the movr instruction pattern. */
7238 case 'd':
7239 case 'D':
7240 {
7241 enum rtx_code rc = (code == 'd'
7242 ? reverse_condition (GET_CODE (x))
7243 : GET_CODE (x));
7244 switch (rc)
7245 {
7246 case NE: fputs ("ne", file); break;
7247 case EQ: fputs ("e", file); break;
7248 case GE: fputs ("gez", file); break;
7249 case LT: fputs ("lz", file); break;
7250 case LE: fputs ("lez", file); break;
7251 case GT: fputs ("gz", file); break;
7252 default: output_operand_lossage (code == 'd'
7253 ? "invalid %%d operand"
7254 : "invalid %%D operand");
7255 }
7256 return;
7257 }
7258
7259 case 'b':
7260 {
7261 /* Print a sign-extended character. */
7262 int i = trunc_int_for_mode (INTVAL (x), QImode);
7263 fprintf (file, "%d", i);
7264 return;
7265 }
7266
7267 case 'f':
7268 /* Operand must be a MEM; write its address. */
7269 if (GET_CODE (x) != MEM)
7270 output_operand_lossage ("invalid %%f operand");
7271 output_address (XEXP (x, 0));
7272 return;
7273
7274 case 's':
7275 {
7276 /* Print a sign-extended 32-bit value. */
7277 HOST_WIDE_INT i;
7278 if (GET_CODE(x) == CONST_INT)
7279 i = INTVAL (x);
7280 else if (GET_CODE(x) == CONST_DOUBLE)
7281 i = CONST_DOUBLE_LOW (x);
7282 else
7283 {
7284 output_operand_lossage ("invalid %%s operand");
7285 return;
7286 }
7287 i = trunc_int_for_mode (i, SImode);
7288 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7289 return;
7290 }
7291
7292 case 0:
7293 /* Do nothing special. */
7294 break;
7295
7296 default:
7297 /* Undocumented flag. */
7298 output_operand_lossage ("invalid operand output code");
7299 }
7300
7301 if (GET_CODE (x) == REG)
7302 fputs (reg_names[REGNO (x)], file);
7303 else if (GET_CODE (x) == MEM)
7304 {
7305 fputc ('[', file);
7306 /* Poor Sun assembler doesn't understand absolute addressing. */
7307 if (CONSTANT_P (XEXP (x, 0)))
7308 fputs ("%g0+", file);
7309 output_address (XEXP (x, 0));
7310 fputc (']', file);
7311 }
7312 else if (GET_CODE (x) == HIGH)
7313 {
7314 fputs ("%hi(", file);
7315 output_addr_const (file, XEXP (x, 0));
7316 fputc (')', file);
7317 }
7318 else if (GET_CODE (x) == LO_SUM)
7319 {
7320 print_operand (file, XEXP (x, 0), 0);
7321 if (TARGET_CM_MEDMID)
7322 fputs ("+%l44(", file);
7323 else
7324 fputs ("+%lo(", file);
7325 output_addr_const (file, XEXP (x, 1));
7326 fputc (')', file);
7327 }
7328 else if (GET_CODE (x) == CONST_DOUBLE
7329 && (GET_MODE (x) == VOIDmode
7330 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7331 {
7332 if (CONST_DOUBLE_HIGH (x) == 0)
7333 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7334 else if (CONST_DOUBLE_HIGH (x) == -1
7335 && CONST_DOUBLE_LOW (x) < 0)
7336 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7337 else
7338 output_operand_lossage ("long long constant not a valid immediate operand");
7339 }
7340 else if (GET_CODE (x) == CONST_DOUBLE)
7341 output_operand_lossage ("floating point constant not a valid immediate operand");
7342 else { output_addr_const (file, x); }
7343 }
7344 \f
7345 /* Target hook for assembling integer objects. The sparc version has
7346 special handling for aligned DI-mode objects. */
7347
7348 static bool
7349 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7350 {
7351 /* ??? We only output .xword's for symbols and only then in environments
7352 where the assembler can handle them. */
7353 if (aligned_p && size == 8
7354 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7355 {
7356 if (TARGET_V9)
7357 {
7358 assemble_integer_with_op ("\t.xword\t", x);
7359 return true;
7360 }
7361 else
7362 {
7363 assemble_aligned_integer (4, const0_rtx);
7364 assemble_aligned_integer (4, x);
7365 return true;
7366 }
7367 }
7368 return default_assemble_integer (x, size, aligned_p);
7369 }
7370 \f
7371 /* Return the value of a code used in the .proc pseudo-op that says
7372 what kind of result this function returns. For non-C types, we pick
7373 the closest C type. */
7374
7375 #ifndef SHORT_TYPE_SIZE
7376 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7377 #endif
7378
7379 #ifndef INT_TYPE_SIZE
7380 #define INT_TYPE_SIZE BITS_PER_WORD
7381 #endif
7382
7383 #ifndef LONG_TYPE_SIZE
7384 #define LONG_TYPE_SIZE BITS_PER_WORD
7385 #endif
7386
7387 #ifndef LONG_LONG_TYPE_SIZE
7388 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7389 #endif
7390
7391 #ifndef FLOAT_TYPE_SIZE
7392 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7393 #endif
7394
7395 #ifndef DOUBLE_TYPE_SIZE
7396 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7397 #endif
7398
7399 #ifndef LONG_DOUBLE_TYPE_SIZE
7400 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7401 #endif
7402
7403 unsigned long
7404 sparc_type_code (register tree type)
7405 {
7406 register unsigned long qualifiers = 0;
7407 register unsigned shift;
7408
7409 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7410 setting more, since some assemblers will give an error for this. Also,
7411 we must be careful to avoid shifts of 32 bits or more to avoid getting
7412 unpredictable results. */
7413
7414 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7415 {
7416 switch (TREE_CODE (type))
7417 {
7418 case ERROR_MARK:
7419 return qualifiers;
7420
7421 case ARRAY_TYPE:
7422 qualifiers |= (3 << shift);
7423 break;
7424
7425 case FUNCTION_TYPE:
7426 case METHOD_TYPE:
7427 qualifiers |= (2 << shift);
7428 break;
7429
7430 case POINTER_TYPE:
7431 case REFERENCE_TYPE:
7432 case OFFSET_TYPE:
7433 qualifiers |= (1 << shift);
7434 break;
7435
7436 case RECORD_TYPE:
7437 return (qualifiers | 8);
7438
7439 case UNION_TYPE:
7440 case QUAL_UNION_TYPE:
7441 return (qualifiers | 9);
7442
7443 case ENUMERAL_TYPE:
7444 return (qualifiers | 10);
7445
7446 case VOID_TYPE:
7447 return (qualifiers | 16);
7448
7449 case INTEGER_TYPE:
7450 /* If this is a range type, consider it to be the underlying
7451 type. */
7452 if (TREE_TYPE (type) != 0)
7453 break;
7454
7455 /* Carefully distinguish all the standard types of C,
7456 without messing up if the language is not C. We do this by
7457 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7458 look at both the names and the above fields, but that's redundant.
7459 Any type whose size is between two C types will be considered
7460 to be the wider of the two types. Also, we do not have a
7461 special code to use for "long long", so anything wider than
7462 long is treated the same. Note that we can't distinguish
7463 between "int" and "long" in this code if they are the same
7464 size, but that's fine, since neither can the assembler. */
7465
7466 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7467 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7468
7469 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7470 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7471
7472 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7473 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7474
7475 else
7476 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7477
7478 case REAL_TYPE:
7479 /* If this is a range type, consider it to be the underlying
7480 type. */
7481 if (TREE_TYPE (type) != 0)
7482 break;
7483
7484 /* Carefully distinguish all the standard types of C,
7485 without messing up if the language is not C. */
7486
7487 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7488 return (qualifiers | 6);
7489
7490 else
7491 return (qualifiers | 7);
7492
7493 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7494 /* ??? We need to distinguish between double and float complex types,
7495 but I don't know how yet because I can't reach this code from
7496 existing front-ends. */
7497 return (qualifiers | 7); /* Who knows? */
7498
7499 case VECTOR_TYPE:
7500 case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
7501 case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
7502 case FILE_TYPE: /* GNU Pascal FILE type. */
7503 case SET_TYPE: /* GNU Pascal SET type. */
7504 case LANG_TYPE: /* ? */
7505 return qualifiers;
7506
7507 default:
7508 abort (); /* Not a type! */
7509 }
7510 }
7511
7512 return qualifiers;
7513 }
7514 \f
7515 /* Nested function support. */
7516
7517 /* Emit RTL insns to initialize the variable parts of a trampoline.
7518 FNADDR is an RTX for the address of the function's pure code.
7519 CXT is an RTX for the static chain value for the function.
7520
7521 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7522 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7523 (to store insns). This is a bit excessive. Perhaps a different
7524 mechanism would be better here.
7525
7526 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7527
7528 void
7529 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7530 {
7531 /* SPARC 32-bit trampoline:
7532
7533 sethi %hi(fn), %g1
7534 sethi %hi(static), %g2
7535 jmp %g1+%lo(fn)
7536 or %g2, %lo(static), %g2
7537
7538 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7539 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7540 */
7541
7542 emit_move_insn
7543 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7544 expand_binop (SImode, ior_optab,
7545 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7546 size_int (10), 0, 1),
7547 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7548 NULL_RTX, 1, OPTAB_DIRECT));
7549
7550 emit_move_insn
7551 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7552 expand_binop (SImode, ior_optab,
7553 expand_shift (RSHIFT_EXPR, SImode, cxt,
7554 size_int (10), 0, 1),
7555 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7556 NULL_RTX, 1, OPTAB_DIRECT));
7557
7558 emit_move_insn
7559 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7560 expand_binop (SImode, ior_optab,
7561 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7562 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7563 NULL_RTX, 1, OPTAB_DIRECT));
7564
7565 emit_move_insn
7566 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7567 expand_binop (SImode, ior_optab,
7568 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7569 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7570 NULL_RTX, 1, OPTAB_DIRECT));
7571
7572 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7573 aligned on a 16 byte boundary so one flush clears it all. */
7574 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7575 if (sparc_cpu != PROCESSOR_ULTRASPARC
7576 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7577 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7578 plus_constant (tramp, 8)))));
7579
7580 /* Call __enable_execute_stack after writing onto the stack to make sure
7581 the stack address is accessible. */
7582 #ifdef ENABLE_EXECUTE_STACK
7583 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7584 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7585 #endif
7586
7587 }
7588
7589 /* The 64-bit version is simpler because it makes more sense to load the
7590 values as "immediate" data out of the trampoline. It's also easier since
7591 we can read the PC without clobbering a register. */
7592
7593 void
7594 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7595 {
7596 /* SPARC 64-bit trampoline:
7597
7598 rd %pc, %g1
7599 ldx [%g1+24], %g5
7600 jmp %g5
7601 ldx [%g1+16], %g5
7602 +16 bytes data
7603 */
7604
7605 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7606 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7607 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7608 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7609 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7610 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7611 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7612 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7613 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7614 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7615 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7616
7617 if (sparc_cpu != PROCESSOR_ULTRASPARC
7618 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7619 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7620
7621 /* Call __enable_execute_stack after writing onto the stack to make sure
7622 the stack address is accessible. */
7623 #ifdef ENABLE_EXECUTE_STACK
7624 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7625 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7626 #endif
7627 }
7628 \f
7629 /* Adjust the cost of a scheduling dependency. Return the new cost of
7630 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7631
7632 static int
7633 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7634 {
7635 enum attr_type insn_type;
7636
7637 if (! recog_memoized (insn))
7638 return 0;
7639
7640 insn_type = get_attr_type (insn);
7641
7642 if (REG_NOTE_KIND (link) == 0)
7643 {
7644 /* Data dependency; DEP_INSN writes a register that INSN reads some
7645 cycles later. */
7646
7647 /* if a load, then the dependence must be on the memory address;
7648 add an extra "cycle". Note that the cost could be two cycles
7649 if the reg was written late in an instruction group; we ca not tell
7650 here. */
7651 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7652 return cost + 3;
7653
7654 /* Get the delay only if the address of the store is the dependence. */
7655 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7656 {
7657 rtx pat = PATTERN(insn);
7658 rtx dep_pat = PATTERN (dep_insn);
7659
7660 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7661 return cost; /* This should not happen! */
7662
7663 /* The dependency between the two instructions was on the data that
7664 is being stored. Assume that this implies that the address of the
7665 store is not dependent. */
7666 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7667 return cost;
7668
7669 return cost + 3; /* An approximation. */
7670 }
7671
7672 /* A shift instruction cannot receive its data from an instruction
7673 in the same cycle; add a one cycle penalty. */
7674 if (insn_type == TYPE_SHIFT)
7675 return cost + 3; /* Split before cascade into shift. */
7676 }
7677 else
7678 {
7679 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7680 INSN writes some cycles later. */
7681
7682 /* These are only significant for the fpu unit; writing a fp reg before
7683 the fpu has finished with it stalls the processor. */
7684
7685 /* Reusing an integer register causes no problems. */
7686 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7687 return 0;
7688 }
7689
7690 return cost;
7691 }
7692
7693 static int
7694 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7695 {
7696 enum attr_type insn_type, dep_type;
7697 rtx pat = PATTERN(insn);
7698 rtx dep_pat = PATTERN (dep_insn);
7699
7700 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7701 return cost;
7702
7703 insn_type = get_attr_type (insn);
7704 dep_type = get_attr_type (dep_insn);
7705
7706 switch (REG_NOTE_KIND (link))
7707 {
7708 case 0:
7709 /* Data dependency; DEP_INSN writes a register that INSN reads some
7710 cycles later. */
7711
7712 switch (insn_type)
7713 {
7714 case TYPE_STORE:
7715 case TYPE_FPSTORE:
7716 /* Get the delay iff the address of the store is the dependence. */
7717 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7718 return cost;
7719
7720 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7721 return cost;
7722 return cost + 3;
7723
7724 case TYPE_LOAD:
7725 case TYPE_SLOAD:
7726 case TYPE_FPLOAD:
7727 /* If a load, then the dependence must be on the memory address. If
7728 the addresses aren't equal, then it might be a false dependency */
7729 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7730 {
7731 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7732 || GET_CODE (SET_DEST (dep_pat)) != MEM
7733 || GET_CODE (SET_SRC (pat)) != MEM
7734 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7735 XEXP (SET_SRC (pat), 0)))
7736 return cost + 2;
7737
7738 return cost + 8;
7739 }
7740 break;
7741
7742 case TYPE_BRANCH:
7743 /* Compare to branch latency is 0. There is no benefit from
7744 separating compare and branch. */
7745 if (dep_type == TYPE_COMPARE)
7746 return 0;
7747 /* Floating point compare to branch latency is less than
7748 compare to conditional move. */
7749 if (dep_type == TYPE_FPCMP)
7750 return cost - 1;
7751 break;
7752 default:
7753 break;
7754 }
7755 break;
7756
7757 case REG_DEP_ANTI:
7758 /* Anti-dependencies only penalize the fpu unit. */
7759 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7760 return 0;
7761 break;
7762
7763 default:
7764 break;
7765 }
7766
7767 return cost;
7768 }
7769
7770 static int
7771 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7772 {
7773 switch (sparc_cpu)
7774 {
7775 case PROCESSOR_SUPERSPARC:
7776 cost = supersparc_adjust_cost (insn, link, dep, cost);
7777 break;
7778 case PROCESSOR_HYPERSPARC:
7779 case PROCESSOR_SPARCLITE86X:
7780 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7781 break;
7782 default:
7783 break;
7784 }
7785 return cost;
7786 }
7787
7788 static void
7789 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7790 int sched_verbose ATTRIBUTE_UNUSED,
7791 int max_ready ATTRIBUTE_UNUSED)
7792 {
7793 }
7794
7795 static int
7796 sparc_use_sched_lookahead (void)
7797 {
7798 if (sparc_cpu == PROCESSOR_ULTRASPARC
7799 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7800 return 4;
7801 if ((1 << sparc_cpu) &
7802 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7803 (1 << PROCESSOR_SPARCLITE86X)))
7804 return 3;
7805 return 0;
7806 }
7807
7808 static int
7809 sparc_issue_rate (void)
7810 {
7811 switch (sparc_cpu)
7812 {
7813 default:
7814 return 1;
7815 case PROCESSOR_V9:
7816 /* Assume V9 processors are capable of at least dual-issue. */
7817 return 2;
7818 case PROCESSOR_SUPERSPARC:
7819 return 3;
7820 case PROCESSOR_HYPERSPARC:
7821 case PROCESSOR_SPARCLITE86X:
7822 return 2;
7823 case PROCESSOR_ULTRASPARC:
7824 case PROCESSOR_ULTRASPARC3:
7825 return 4;
7826 }
7827 }
7828
7829 static int
7830 set_extends (rtx insn)
7831 {
7832 register rtx pat = PATTERN (insn);
7833
7834 switch (GET_CODE (SET_SRC (pat)))
7835 {
7836 /* Load and some shift instructions zero extend. */
7837 case MEM:
7838 case ZERO_EXTEND:
7839 /* sethi clears the high bits */
7840 case HIGH:
7841 /* LO_SUM is used with sethi. sethi cleared the high
7842 bits and the values used with lo_sum are positive */
7843 case LO_SUM:
7844 /* Store flag stores 0 or 1 */
7845 case LT: case LTU:
7846 case GT: case GTU:
7847 case LE: case LEU:
7848 case GE: case GEU:
7849 case EQ:
7850 case NE:
7851 return 1;
7852 case AND:
7853 {
7854 rtx op0 = XEXP (SET_SRC (pat), 0);
7855 rtx op1 = XEXP (SET_SRC (pat), 1);
7856 if (GET_CODE (op1) == CONST_INT)
7857 return INTVAL (op1) >= 0;
7858 if (GET_CODE (op0) != REG)
7859 return 0;
7860 if (sparc_check_64 (op0, insn) == 1)
7861 return 1;
7862 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7863 }
7864 case IOR:
7865 case XOR:
7866 {
7867 rtx op0 = XEXP (SET_SRC (pat), 0);
7868 rtx op1 = XEXP (SET_SRC (pat), 1);
7869 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7870 return 0;
7871 if (GET_CODE (op1) == CONST_INT)
7872 return INTVAL (op1) >= 0;
7873 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7874 }
7875 case LSHIFTRT:
7876 return GET_MODE (SET_SRC (pat)) == SImode;
7877 /* Positive integers leave the high bits zero. */
7878 case CONST_DOUBLE:
7879 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7880 case CONST_INT:
7881 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7882 case ASHIFTRT:
7883 case SIGN_EXTEND:
7884 return - (GET_MODE (SET_SRC (pat)) == SImode);
7885 case REG:
7886 return sparc_check_64 (SET_SRC (pat), insn);
7887 default:
7888 return 0;
7889 }
7890 }
7891
7892 /* We _ought_ to have only one kind per function, but... */
7893 static GTY(()) rtx sparc_addr_diff_list;
7894 static GTY(()) rtx sparc_addr_list;
7895
7896 void
7897 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7898 {
7899 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7900 if (diff)
7901 sparc_addr_diff_list
7902 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7903 else
7904 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7905 }
7906
7907 static void
7908 sparc_output_addr_vec (rtx vec)
7909 {
7910 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7911 int idx, vlen = XVECLEN (body, 0);
7912
7913 #ifdef ASM_OUTPUT_ADDR_VEC_START
7914 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7915 #endif
7916
7917 #ifdef ASM_OUTPUT_CASE_LABEL
7918 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7919 NEXT_INSN (lab));
7920 #else
7921 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7922 #endif
7923
7924 for (idx = 0; idx < vlen; idx++)
7925 {
7926 ASM_OUTPUT_ADDR_VEC_ELT
7927 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7928 }
7929
7930 #ifdef ASM_OUTPUT_ADDR_VEC_END
7931 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7932 #endif
7933 }
7934
7935 static void
7936 sparc_output_addr_diff_vec (rtx vec)
7937 {
7938 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7939 rtx base = XEXP (XEXP (body, 0), 0);
7940 int idx, vlen = XVECLEN (body, 1);
7941
7942 #ifdef ASM_OUTPUT_ADDR_VEC_START
7943 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7944 #endif
7945
7946 #ifdef ASM_OUTPUT_CASE_LABEL
7947 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7948 NEXT_INSN (lab));
7949 #else
7950 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7951 #endif
7952
7953 for (idx = 0; idx < vlen; idx++)
7954 {
7955 ASM_OUTPUT_ADDR_DIFF_ELT
7956 (asm_out_file,
7957 body,
7958 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7959 CODE_LABEL_NUMBER (base));
7960 }
7961
7962 #ifdef ASM_OUTPUT_ADDR_VEC_END
7963 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7964 #endif
7965 }
7966
7967 static void
7968 sparc_output_deferred_case_vectors (void)
7969 {
7970 rtx t;
7971 int align;
7972
7973 if (sparc_addr_list == NULL_RTX
7974 && sparc_addr_diff_list == NULL_RTX)
7975 return;
7976
7977 /* Align to cache line in the function's code section. */
7978 function_section (current_function_decl);
7979
7980 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7981 if (align > 0)
7982 ASM_OUTPUT_ALIGN (asm_out_file, align);
7983
7984 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7985 sparc_output_addr_vec (XEXP (t, 0));
7986 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7987 sparc_output_addr_diff_vec (XEXP (t, 0));
7988
7989 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7990 }
7991
7992 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7993 unknown. Return 1 if the high bits are zero, -1 if the register is
7994 sign extended. */
7995 int
7996 sparc_check_64 (rtx x, rtx insn)
7997 {
7998 /* If a register is set only once it is safe to ignore insns this
7999 code does not know how to handle. The loop will either recognize
8000 the single set and return the correct value or fail to recognize
8001 it and return 0. */
8002 int set_once = 0;
8003 rtx y = x;
8004
8005 if (GET_CODE (x) != REG)
8006 abort ();
8007
8008 if (GET_MODE (x) == DImode)
8009 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8010
8011 if (flag_expensive_optimizations
8012 && REG_N_SETS (REGNO (y)) == 1)
8013 set_once = 1;
8014
8015 if (insn == 0)
8016 {
8017 if (set_once)
8018 insn = get_last_insn_anywhere ();
8019 else
8020 return 0;
8021 }
8022
8023 while ((insn = PREV_INSN (insn)))
8024 {
8025 switch (GET_CODE (insn))
8026 {
8027 case JUMP_INSN:
8028 case NOTE:
8029 break;
8030 case CODE_LABEL:
8031 case CALL_INSN:
8032 default:
8033 if (! set_once)
8034 return 0;
8035 break;
8036 case INSN:
8037 {
8038 rtx pat = PATTERN (insn);
8039 if (GET_CODE (pat) != SET)
8040 return 0;
8041 if (rtx_equal_p (x, SET_DEST (pat)))
8042 return set_extends (insn);
8043 if (y && rtx_equal_p (y, SET_DEST (pat)))
8044 return set_extends (insn);
8045 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8046 return 0;
8047 }
8048 }
8049 }
8050 return 0;
8051 }
8052
8053 /* Returns assembly code to perform a DImode shift using
8054 a 64-bit global or out register on SPARC-V8+. */
8055 const char *
8056 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8057 {
8058 static char asm_code[60];
8059
8060 /* The scratch register is only required when the destination
8061 register is not a 64-bit global or out register. */
8062 if (which_alternative != 2)
8063 operands[3] = operands[0];
8064
8065 /* We can only shift by constants <= 63. */
8066 if (GET_CODE (operands[2]) == CONST_INT)
8067 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8068
8069 if (GET_CODE (operands[1]) == CONST_INT)
8070 {
8071 output_asm_insn ("mov\t%1, %3", operands);
8072 }
8073 else
8074 {
8075 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8076 if (sparc_check_64 (operands[1], insn) <= 0)
8077 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8078 output_asm_insn ("or\t%L1, %3, %3", operands);
8079 }
8080
8081 strcpy(asm_code, opcode);
8082
8083 if (which_alternative != 2)
8084 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8085 else
8086 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8087 }
8088 \f
8089 /* Output rtl to increment the profiler label LABELNO
8090 for profiling a function entry. */
8091
8092 void
8093 sparc_profile_hook (int labelno)
8094 {
8095 char buf[32];
8096 rtx lab, fun;
8097
8098 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8099 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8100 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8101
8102 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8103 }
8104 \f
8105 #ifdef OBJECT_FORMAT_ELF
8106 static void
8107 sparc_elf_asm_named_section (const char *name, unsigned int flags,
8108 tree decl)
8109 {
8110 if (flags & SECTION_MERGE)
8111 {
8112 /* entsize cannot be expressed in this section attributes
8113 encoding style. */
8114 default_elf_asm_named_section (name, flags, decl);
8115 return;
8116 }
8117
8118 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8119
8120 if (!(flags & SECTION_DEBUG))
8121 fputs (",#alloc", asm_out_file);
8122 if (flags & SECTION_WRITE)
8123 fputs (",#write", asm_out_file);
8124 if (flags & SECTION_TLS)
8125 fputs (",#tls", asm_out_file);
8126 if (flags & SECTION_CODE)
8127 fputs (",#execinstr", asm_out_file);
8128
8129 /* ??? Handle SECTION_BSS. */
8130
8131 fputc ('\n', asm_out_file);
8132 }
8133 #endif /* OBJECT_FORMAT_ELF */
8134
8135 /* We do not allow indirect calls to be optimized into sibling calls.
8136
8137 We cannot use sibling calls when delayed branches are disabled
8138 because they will likely require the call delay slot to be filled.
8139
8140 Also, on SPARC 32-bit we cannot emit a sibling call when the
8141 current function returns a structure. This is because the "unimp
8142 after call" convention would cause the callee to return to the
8143 wrong place. The generic code already disallows cases where the
8144 function being called returns a structure.
8145
8146 It may seem strange how this last case could occur. Usually there
8147 is code after the call which jumps to epilogue code which dumps the
8148 return value into the struct return area. That ought to invalidate
8149 the sibling call right? Well, in the C++ case we can end up passing
8150 the pointer to the struct return area to a constructor (which returns
8151 void) and then nothing else happens. Such a sibling call would look
8152 valid without the added check here. */
8153 static bool
8154 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8155 {
8156 return (decl
8157 && flag_delayed_branch
8158 && (TARGET_ARCH64 || ! current_function_returns_struct));
8159 }
8160 \f
8161 /* libfunc renaming. */
8162 #include "config/gofast.h"
8163
8164 static void
8165 sparc_init_libfuncs (void)
8166 {
8167 if (TARGET_ARCH32)
8168 {
8169 /* Use the subroutines that Sun's library provides for integer
8170 multiply and divide. The `*' prevents an underscore from
8171 being prepended by the compiler. .umul is a little faster
8172 than .mul. */
8173 set_optab_libfunc (smul_optab, SImode, "*.umul");
8174 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8175 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8176 set_optab_libfunc (smod_optab, SImode, "*.rem");
8177 set_optab_libfunc (umod_optab, SImode, "*.urem");
8178
8179 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8180 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8181 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8182 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8183 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8184 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8185
8186 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8187 is because with soft-float, the SFmode and DFmode sqrt
8188 instructions will be absent, and the compiler will notice and
8189 try to use the TFmode sqrt instruction for calls to the
8190 builtin function sqrt, but this fails. */
8191 if (TARGET_FPU)
8192 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8193
8194 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8195 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8196 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8197 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8198 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8199 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8200
8201 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8202 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8203 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8204 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8205
8206 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8207 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8208 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8209
8210 if (DITF_CONVERSION_LIBFUNCS)
8211 {
8212 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8213 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8214 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8215 }
8216
8217 if (SUN_CONVERSION_LIBFUNCS)
8218 {
8219 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8220 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8221 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8222 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8223 }
8224 }
8225 if (TARGET_ARCH64)
8226 {
8227 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8228 do not exist in the library. Make sure the compiler does not
8229 emit calls to them by accident. (It should always use the
8230 hardware instructions.) */
8231 set_optab_libfunc (smul_optab, SImode, 0);
8232 set_optab_libfunc (sdiv_optab, SImode, 0);
8233 set_optab_libfunc (udiv_optab, SImode, 0);
8234 set_optab_libfunc (smod_optab, SImode, 0);
8235 set_optab_libfunc (umod_optab, SImode, 0);
8236
8237 if (SUN_INTEGER_MULTIPLY_64)
8238 {
8239 set_optab_libfunc (smul_optab, DImode, "__mul64");
8240 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8241 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8242 set_optab_libfunc (smod_optab, DImode, "__rem64");
8243 set_optab_libfunc (umod_optab, DImode, "__urem64");
8244 }
8245
8246 if (SUN_CONVERSION_LIBFUNCS)
8247 {
8248 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8249 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8250 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8251 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8252 }
8253 }
8254
8255 gofast_maybe_init_libfuncs ();
8256 }
8257 \f
8258 int
8259 sparc_extra_constraint_check (rtx op, int c, int strict)
8260 {
8261 int reload_ok_mem;
8262
8263 if (TARGET_ARCH64
8264 && (c == 'T' || c == 'U'))
8265 return 0;
8266
8267 switch (c)
8268 {
8269 case 'Q':
8270 return fp_sethi_p (op);
8271
8272 case 'R':
8273 return fp_mov_p (op);
8274
8275 case 'S':
8276 return fp_high_losum_p (op);
8277
8278 case 'U':
8279 if (! strict
8280 || (GET_CODE (op) == REG
8281 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8282 || reg_renumber[REGNO (op)] >= 0)))
8283 return register_ok_for_ldd (op);
8284
8285 return 0;
8286
8287 case 'W':
8288 case 'T':
8289 break;
8290
8291 default:
8292 return 0;
8293 }
8294
8295 /* Our memory extra constraints have to emulate the
8296 behavior of 'm' and 'o' in order for reload to work
8297 correctly. */
8298 if (GET_CODE (op) == MEM)
8299 {
8300 reload_ok_mem = 0;
8301 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8302 && (! strict
8303 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8304 reload_ok_mem = 1;
8305 }
8306 else
8307 {
8308 reload_ok_mem = (reload_in_progress
8309 && GET_CODE (op) == REG
8310 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8311 && reg_renumber [REGNO (op)] < 0);
8312 }
8313
8314 return reload_ok_mem;
8315 }
8316
8317 /* ??? This duplicates information provided to the compiler by the
8318 ??? scheduler description. Some day, teach genautomata to output
8319 ??? the latencies and then CSE will just use that. */
8320
8321 static bool
8322 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8323 {
8324 enum machine_mode mode = GET_MODE (x);
8325 bool float_mode_p = FLOAT_MODE_P (mode);
8326
8327 switch (code)
8328 {
8329 case CONST_INT:
8330 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8331 {
8332 *total = 0;
8333 return true;
8334 }
8335 /* FALLTHRU */
8336
8337 case HIGH:
8338 *total = 2;
8339 return true;
8340
8341 case CONST:
8342 case LABEL_REF:
8343 case SYMBOL_REF:
8344 *total = 4;
8345 return true;
8346
8347 case CONST_DOUBLE:
8348 if (GET_MODE (x) == DImode
8349 && ((XINT (x, 3) == 0
8350 && (unsigned HOST_WIDE_INT) XINT (x, 2) < 0x1000)
8351 || (XINT (x, 3) == -1
8352 && XINT (x, 2) < 0
8353 && XINT (x, 2) >= -0x1000)))
8354 *total = 0;
8355 else
8356 *total = 8;
8357 return true;
8358
8359 case MEM:
8360 /* If outer-code was a sign or zero extension, a cost
8361 of COSTS_N_INSNS (1) was already added in. This is
8362 why we are subtracting it back out. */
8363 if (outer_code == ZERO_EXTEND)
8364 {
8365 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8366 }
8367 else if (outer_code == SIGN_EXTEND)
8368 {
8369 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8370 }
8371 else if (float_mode_p)
8372 {
8373 *total = sparc_costs->float_load;
8374 }
8375 else
8376 {
8377 *total = sparc_costs->int_load;
8378 }
8379
8380 return true;
8381
8382 case PLUS:
8383 case MINUS:
8384 if (float_mode_p)
8385 *total = sparc_costs->float_plusminus;
8386 else
8387 *total = COSTS_N_INSNS (1);
8388 return false;
8389
8390 case MULT:
8391 if (float_mode_p)
8392 *total = sparc_costs->float_mul;
8393 else if (! TARGET_HARD_MUL)
8394 *total = COSTS_N_INSNS (25);
8395 else
8396 {
8397 int bit_cost;
8398
8399 bit_cost = 0;
8400 if (sparc_costs->int_mul_bit_factor)
8401 {
8402 int nbits;
8403
8404 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8405 {
8406 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8407 for (nbits = 0; value != 0; value &= value - 1)
8408 nbits++;
8409 }
8410 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8411 && GET_MODE (XEXP (x, 1)) == DImode)
8412 {
8413 rtx x1 = XEXP (x, 1);
8414 unsigned HOST_WIDE_INT value1 = XINT (x1, 2);
8415 unsigned HOST_WIDE_INT value2 = XINT (x1, 3);
8416
8417 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8418 nbits++;
8419 for (; value2 != 0; value2 &= value2 - 1)
8420 nbits++;
8421 }
8422 else
8423 nbits = 7;
8424
8425 if (nbits < 3)
8426 nbits = 3;
8427 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8428 bit_cost = COSTS_N_INSNS (bit_cost);
8429 }
8430
8431 if (mode == DImode)
8432 *total = sparc_costs->int_mulX + bit_cost;
8433 else
8434 *total = sparc_costs->int_mul + bit_cost;
8435 }
8436 return false;
8437
8438 case ASHIFT:
8439 case ASHIFTRT:
8440 case LSHIFTRT:
8441 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8442 return false;
8443
8444 case DIV:
8445 case UDIV:
8446 case MOD:
8447 case UMOD:
8448 if (float_mode_p)
8449 {
8450 if (mode == DFmode)
8451 *total = sparc_costs->float_div_df;
8452 else
8453 *total = sparc_costs->float_div_sf;
8454 }
8455 else
8456 {
8457 if (mode == DImode)
8458 *total = sparc_costs->int_divX;
8459 else
8460 *total = sparc_costs->int_div;
8461 }
8462 return false;
8463
8464 case NEG:
8465 if (! float_mode_p)
8466 {
8467 *total = COSTS_N_INSNS (1);
8468 return false;
8469 }
8470 /* FALLTHRU */
8471
8472 case ABS:
8473 case FLOAT:
8474 case UNSIGNED_FLOAT:
8475 case FIX:
8476 case UNSIGNED_FIX:
8477 case FLOAT_EXTEND:
8478 case FLOAT_TRUNCATE:
8479 *total = sparc_costs->float_move;
8480 return false;
8481
8482 case SQRT:
8483 if (mode == DFmode)
8484 *total = sparc_costs->float_sqrt_df;
8485 else
8486 *total = sparc_costs->float_sqrt_sf;
8487 return false;
8488
8489 case COMPARE:
8490 if (float_mode_p)
8491 *total = sparc_costs->float_cmp;
8492 else
8493 *total = COSTS_N_INSNS (1);
8494 return false;
8495
8496 case IF_THEN_ELSE:
8497 if (float_mode_p)
8498 *total = sparc_costs->float_cmove;
8499 else
8500 *total = sparc_costs->int_cmove;
8501 return false;
8502
8503 default:
8504 return false;
8505 }
8506 }
8507
8508 /* Emit the sequence of insns SEQ while preserving the register REG. */
8509
8510 static void
8511 emit_and_preserve (rtx seq, rtx reg)
8512 {
8513 rtx slot = gen_rtx_MEM (word_mode,
8514 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8515
8516 emit_stack_pointer_decrement (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT));
8517 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8518 emit_insn (seq);
8519 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8520 emit_stack_pointer_increment (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT));
8521 }
8522
8523 /* Output the assembler code for a thunk function. THUNK_DECL is the
8524 declaration for the thunk function itself, FUNCTION is the decl for
8525 the target function. DELTA is an immediate constant offset to be
8526 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8527 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8528
8529 static void
8530 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8531 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8532 tree function)
8533 {
8534 rtx this, insn, funexp;
8535 unsigned int int_arg_first;
8536
8537 reload_completed = 1;
8538 epilogue_completed = 1;
8539 no_new_pseudos = 1;
8540 reset_block_changes ();
8541
8542 emit_note (NOTE_INSN_PROLOGUE_END);
8543
8544 if (flag_delayed_branch)
8545 {
8546 /* We will emit a regular sibcall below, so we need to instruct
8547 output_sibcall that we are in a leaf function. */
8548 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8549
8550 /* This will cause final.c to invoke leaf_renumber_regs so we
8551 must behave as if we were in a not-yet-leafified function. */
8552 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8553 }
8554 else
8555 {
8556 /* We will emit the sibcall manually below, so we will need to
8557 manually spill non-leaf registers. */
8558 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8559
8560 /* We really are in a leaf function. */
8561 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8562 }
8563
8564 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8565 returns a structure, the structure return pointer is there instead. */
8566 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8567 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8568 else
8569 this = gen_rtx_REG (Pmode, int_arg_first);
8570
8571 /* Add DELTA. When possible use a plain add, otherwise load it into
8572 a register first. */
8573 if (delta)
8574 {
8575 rtx delta_rtx = GEN_INT (delta);
8576
8577 if (! SPARC_SIMM13_P (delta))
8578 {
8579 rtx scratch = gen_rtx_REG (Pmode, 1);
8580 emit_move_insn (scratch, delta_rtx);
8581 delta_rtx = scratch;
8582 }
8583
8584 /* THIS += DELTA. */
8585 emit_insn (gen_add2_insn (this, delta_rtx));
8586 }
8587
8588 /* Add the word at address (*THIS + VCALL_OFFSET). */
8589 if (vcall_offset)
8590 {
8591 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8592 rtx scratch = gen_rtx_REG (Pmode, 1);
8593
8594 if (vcall_offset >= 0)
8595 abort ();
8596
8597 /* SCRATCH = *THIS. */
8598 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8599
8600 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8601 may not have any available scratch register at this point. */
8602 if (SPARC_SIMM13_P (vcall_offset))
8603 ;
8604 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8605 else if (! fixed_regs[5]
8606 /* The below sequence is made up of at least 2 insns,
8607 while the default method may need only one. */
8608 && vcall_offset < -8192)
8609 {
8610 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8611 emit_move_insn (scratch2, vcall_offset_rtx);
8612 vcall_offset_rtx = scratch2;
8613 }
8614 else
8615 {
8616 rtx increment = GEN_INT (-4096);
8617
8618 /* VCALL_OFFSET is a negative number whose typical range can be
8619 estimated as -32768..0 in 32-bit mode. In almost all cases
8620 it is therefore cheaper to emit multiple add insns than
8621 spilling and loading the constant into a register (at least
8622 6 insns). */
8623 while (! SPARC_SIMM13_P (vcall_offset))
8624 {
8625 emit_insn (gen_add2_insn (scratch, increment));
8626 vcall_offset += 4096;
8627 }
8628 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8629 }
8630
8631 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8632 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8633 gen_rtx_PLUS (Pmode,
8634 scratch,
8635 vcall_offset_rtx)));
8636
8637 /* THIS += *(*THIS + VCALL_OFFSET). */
8638 emit_insn (gen_add2_insn (this, scratch));
8639 }
8640
8641 /* Generate a tail call to the target function. */
8642 if (! TREE_USED (function))
8643 {
8644 assemble_external (function);
8645 TREE_USED (function) = 1;
8646 }
8647 funexp = XEXP (DECL_RTL (function), 0);
8648
8649 if (flag_delayed_branch)
8650 {
8651 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8652 insn = emit_call_insn (gen_sibcall (funexp));
8653 SIBLING_CALL_P (insn) = 1;
8654 }
8655 else
8656 {
8657 /* The hoops we have to jump through in order to generate a sibcall
8658 without using delay slots... */
8659 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
8660
8661 if (flag_pic)
8662 {
8663 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8664 start_sequence ();
8665 load_pic_register (); /* clobbers %o7 */
8666 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8667 seq = get_insns ();
8668 end_sequence ();
8669 emit_and_preserve (seq, spill_reg);
8670 }
8671 else if (TARGET_ARCH32)
8672 {
8673 emit_insn (gen_rtx_SET (VOIDmode,
8674 scratch,
8675 gen_rtx_HIGH (SImode, funexp)));
8676 emit_insn (gen_rtx_SET (VOIDmode,
8677 scratch,
8678 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8679 }
8680 else /* TARGET_ARCH64 */
8681 {
8682 switch (sparc_cmodel)
8683 {
8684 case CM_MEDLOW:
8685 case CM_MEDMID:
8686 /* The destination can serve as a temporary. */
8687 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8688 break;
8689
8690 case CM_MEDANY:
8691 case CM_EMBMEDANY:
8692 /* The destination cannot serve as a temporary. */
8693 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8694 start_sequence ();
8695 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8696 seq = get_insns ();
8697 end_sequence ();
8698 emit_and_preserve (seq, spill_reg);
8699 break;
8700
8701 default:
8702 abort();
8703 }
8704 }
8705
8706 emit_jump_insn (gen_indirect_jump (scratch));
8707 }
8708
8709 emit_barrier ();
8710
8711 /* Run just enough of rest_of_compilation to get the insns emitted.
8712 There's not really enough bulk here to make other passes such as
8713 instruction scheduling worth while. Note that use_thunk calls
8714 assemble_start_function and assemble_end_function. */
8715 insn = get_insns ();
8716 insn_locators_initialize ();
8717 shorten_branches (insn);
8718 final_start_function (insn, file, 1);
8719 final (insn, file, 1, 0);
8720 final_end_function ();
8721
8722 reload_completed = 0;
8723 epilogue_completed = 0;
8724 no_new_pseudos = 0;
8725 }
8726
8727 /* Return true if sparc_output_mi_thunk would be able to output the
8728 assembler code for the thunk function specified by the arguments
8729 it is passed, and false otherwise. */
8730 static bool
8731 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8732 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8733 HOST_WIDE_INT vcall_offset,
8734 tree function ATTRIBUTE_UNUSED)
8735 {
8736 /* Bound the loop used in the default method above. */
8737 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8738 }
8739
8740 /* How to allocate a 'struct machine_function'. */
8741
8742 static struct machine_function *
8743 sparc_init_machine_status (void)
8744 {
8745 return ggc_alloc_cleared (sizeof (struct machine_function));
8746 }
8747
8748 /* Locate some local-dynamic symbol still in use by this function
8749 so that we can print its name in local-dynamic base patterns. */
8750
8751 static const char *
8752 get_some_local_dynamic_name (void)
8753 {
8754 rtx insn;
8755
8756 if (cfun->machine->some_ld_name)
8757 return cfun->machine->some_ld_name;
8758
8759 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8760 if (INSN_P (insn)
8761 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8762 return cfun->machine->some_ld_name;
8763
8764 abort ();
8765 }
8766
8767 static int
8768 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8769 {
8770 rtx x = *px;
8771
8772 if (x
8773 && GET_CODE (x) == SYMBOL_REF
8774 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8775 {
8776 cfun->machine->some_ld_name = XSTR (x, 0);
8777 return 1;
8778 }
8779
8780 return 0;
8781 }
8782
8783 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
8784 We need to emit DTP-relative relocations. */
8785
8786 void
8787 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8788 {
8789 switch (size)
8790 {
8791 case 4:
8792 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8793 break;
8794 case 8:
8795 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8796 break;
8797 default:
8798 abort ();
8799 }
8800 output_addr_const (file, x);
8801 fputs (")", file);
8802 }
8803
8804 #include "gt-sparc.h"