5c5b3267b4e261924a4a3e28ce3711836e82a006
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com)
5 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
6 at Cygnus Support.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 59 Temple Place - Suite 330,
23 Boston, MA 02111-1307, USA. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "real.h"
34 #include "insn-config.h"
35 #include "conditions.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "recog.h"
43 #include "toplev.h"
44 #include "ggc.h"
45 #include "tm_p.h"
46 #include "debug.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "cfglayout.h"
50 #include "tree-gimple.h"
51
52 /* Processor costs */
53 static const
54 struct processor_costs cypress_costs = {
55 COSTS_N_INSNS (2), /* int load */
56 COSTS_N_INSNS (2), /* int signed load */
57 COSTS_N_INSNS (2), /* int zeroed load */
58 COSTS_N_INSNS (2), /* float load */
59 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
60 COSTS_N_INSNS (5), /* fadd, fsub */
61 COSTS_N_INSNS (1), /* fcmp */
62 COSTS_N_INSNS (1), /* fmov, fmovr */
63 COSTS_N_INSNS (7), /* fmul */
64 COSTS_N_INSNS (37), /* fdivs */
65 COSTS_N_INSNS (37), /* fdivd */
66 COSTS_N_INSNS (63), /* fsqrts */
67 COSTS_N_INSNS (63), /* fsqrtd */
68 COSTS_N_INSNS (1), /* imul */
69 COSTS_N_INSNS (1), /* imulX */
70 0, /* imul bit factor */
71 COSTS_N_INSNS (1), /* idiv */
72 COSTS_N_INSNS (1), /* idivX */
73 COSTS_N_INSNS (1), /* movcc/movr */
74 0, /* shift penalty */
75 };
76
77 static const
78 struct processor_costs supersparc_costs = {
79 COSTS_N_INSNS (1), /* int load */
80 COSTS_N_INSNS (1), /* int signed load */
81 COSTS_N_INSNS (1), /* int zeroed load */
82 COSTS_N_INSNS (0), /* float load */
83 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
84 COSTS_N_INSNS (3), /* fadd, fsub */
85 COSTS_N_INSNS (3), /* fcmp */
86 COSTS_N_INSNS (1), /* fmov, fmovr */
87 COSTS_N_INSNS (3), /* fmul */
88 COSTS_N_INSNS (6), /* fdivs */
89 COSTS_N_INSNS (9), /* fdivd */
90 COSTS_N_INSNS (12), /* fsqrts */
91 COSTS_N_INSNS (12), /* fsqrtd */
92 COSTS_N_INSNS (4), /* imul */
93 COSTS_N_INSNS (4), /* imulX */
94 0, /* imul bit factor */
95 COSTS_N_INSNS (4), /* idiv */
96 COSTS_N_INSNS (4), /* idivX */
97 COSTS_N_INSNS (1), /* movcc/movr */
98 1, /* shift penalty */
99 };
100
101 static const
102 struct processor_costs hypersparc_costs = {
103 COSTS_N_INSNS (1), /* int load */
104 COSTS_N_INSNS (1), /* int signed load */
105 COSTS_N_INSNS (1), /* int zeroed load */
106 COSTS_N_INSNS (1), /* float load */
107 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
108 COSTS_N_INSNS (1), /* fadd, fsub */
109 COSTS_N_INSNS (1), /* fcmp */
110 COSTS_N_INSNS (1), /* fmov, fmovr */
111 COSTS_N_INSNS (1), /* fmul */
112 COSTS_N_INSNS (8), /* fdivs */
113 COSTS_N_INSNS (12), /* fdivd */
114 COSTS_N_INSNS (17), /* fsqrts */
115 COSTS_N_INSNS (17), /* fsqrtd */
116 COSTS_N_INSNS (17), /* imul */
117 COSTS_N_INSNS (17), /* imulX */
118 0, /* imul bit factor */
119 COSTS_N_INSNS (17), /* idiv */
120 COSTS_N_INSNS (17), /* idivX */
121 COSTS_N_INSNS (1), /* movcc/movr */
122 0, /* shift penalty */
123 };
124
125 static const
126 struct processor_costs sparclet_costs = {
127 COSTS_N_INSNS (3), /* int load */
128 COSTS_N_INSNS (3), /* int signed load */
129 COSTS_N_INSNS (1), /* int zeroed load */
130 COSTS_N_INSNS (1), /* float load */
131 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
132 COSTS_N_INSNS (1), /* fadd, fsub */
133 COSTS_N_INSNS (1), /* fcmp */
134 COSTS_N_INSNS (1), /* fmov, fmovr */
135 COSTS_N_INSNS (1), /* fmul */
136 COSTS_N_INSNS (1), /* fdivs */
137 COSTS_N_INSNS (1), /* fdivd */
138 COSTS_N_INSNS (1), /* fsqrts */
139 COSTS_N_INSNS (1), /* fsqrtd */
140 COSTS_N_INSNS (5), /* imul */
141 COSTS_N_INSNS (5), /* imulX */
142 0, /* imul bit factor */
143 COSTS_N_INSNS (5), /* idiv */
144 COSTS_N_INSNS (5), /* idivX */
145 COSTS_N_INSNS (1), /* movcc/movr */
146 0, /* shift penalty */
147 };
148
149 static const
150 struct processor_costs ultrasparc_costs = {
151 COSTS_N_INSNS (2), /* int load */
152 COSTS_N_INSNS (3), /* int signed load */
153 COSTS_N_INSNS (2), /* int zeroed load */
154 COSTS_N_INSNS (2), /* float load */
155 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
156 COSTS_N_INSNS (4), /* fadd, fsub */
157 COSTS_N_INSNS (1), /* fcmp */
158 COSTS_N_INSNS (2), /* fmov, fmovr */
159 COSTS_N_INSNS (4), /* fmul */
160 COSTS_N_INSNS (13), /* fdivs */
161 COSTS_N_INSNS (23), /* fdivd */
162 COSTS_N_INSNS (13), /* fsqrts */
163 COSTS_N_INSNS (23), /* fsqrtd */
164 COSTS_N_INSNS (4), /* imul */
165 COSTS_N_INSNS (4), /* imulX */
166 2, /* imul bit factor */
167 COSTS_N_INSNS (37), /* idiv */
168 COSTS_N_INSNS (68), /* idivX */
169 COSTS_N_INSNS (2), /* movcc/movr */
170 2, /* shift penalty */
171 };
172
173 static const
174 struct processor_costs ultrasparc3_costs = {
175 COSTS_N_INSNS (2), /* int load */
176 COSTS_N_INSNS (3), /* int signed load */
177 COSTS_N_INSNS (3), /* int zeroed load */
178 COSTS_N_INSNS (2), /* float load */
179 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
180 COSTS_N_INSNS (4), /* fadd, fsub */
181 COSTS_N_INSNS (5), /* fcmp */
182 COSTS_N_INSNS (3), /* fmov, fmovr */
183 COSTS_N_INSNS (4), /* fmul */
184 COSTS_N_INSNS (17), /* fdivs */
185 COSTS_N_INSNS (20), /* fdivd */
186 COSTS_N_INSNS (20), /* fsqrts */
187 COSTS_N_INSNS (29), /* fsqrtd */
188 COSTS_N_INSNS (6), /* imul */
189 COSTS_N_INSNS (6), /* imulX */
190 0, /* imul bit factor */
191 COSTS_N_INSNS (40), /* idiv */
192 COSTS_N_INSNS (71), /* idivX */
193 COSTS_N_INSNS (2), /* movcc/movr */
194 0, /* shift penalty */
195 };
196
197 const struct processor_costs *sparc_costs = &cypress_costs;
198
199 #ifdef HAVE_AS_RELAX_OPTION
200 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
201 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
202 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
203 somebody does not branch between the sethi and jmp. */
204 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
205 #else
206 #define LEAF_SIBCALL_SLOT_RESERVED_P \
207 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
208 #endif
209
210 /* Global variables for machine-dependent things. */
211
212 /* Size of frame. Need to know this to emit return insns from leaf procedures.
213 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
214 reload pass. This is important as the value is later used for scheduling
215 (to see what can go in a delay slot).
216 APPARENT_FSIZE is the size of the stack less the register save area and less
217 the outgoing argument area. It is used when saving call preserved regs. */
218 static HOST_WIDE_INT apparent_fsize;
219 static HOST_WIDE_INT actual_fsize;
220
221 /* Number of live general or floating point registers needed to be
222 saved (as 4-byte quantities). */
223 static int num_gfregs;
224
225 /* The alias set for prologue/epilogue register save/restore. */
226 static GTY(()) int sparc_sr_alias_set;
227
228 /* Save the operands last given to a compare for use when we
229 generate a scc or bcc insn. */
230 rtx sparc_compare_op0, sparc_compare_op1;
231
232 /* Vector to say how input registers are mapped to output registers.
233 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
234 eliminate it. You must use -fomit-frame-pointer to get that. */
235 char leaf_reg_remap[] =
236 { 0, 1, 2, 3, 4, 5, 6, 7,
237 -1, -1, -1, -1, -1, -1, 14, -1,
238 -1, -1, -1, -1, -1, -1, -1, -1,
239 8, 9, 10, 11, 12, 13, -1, 15,
240
241 32, 33, 34, 35, 36, 37, 38, 39,
242 40, 41, 42, 43, 44, 45, 46, 47,
243 48, 49, 50, 51, 52, 53, 54, 55,
244 56, 57, 58, 59, 60, 61, 62, 63,
245 64, 65, 66, 67, 68, 69, 70, 71,
246 72, 73, 74, 75, 76, 77, 78, 79,
247 80, 81, 82, 83, 84, 85, 86, 87,
248 88, 89, 90, 91, 92, 93, 94, 95,
249 96, 97, 98, 99, 100};
250
251 /* Vector, indexed by hard register number, which contains 1
252 for a register that is allowable in a candidate for leaf
253 function treatment. */
254 char sparc_leaf_regs[] =
255 { 1, 1, 1, 1, 1, 1, 1, 1,
256 0, 0, 0, 0, 0, 0, 1, 0,
257 0, 0, 0, 0, 0, 0, 0, 0,
258 1, 1, 1, 1, 1, 1, 0, 1,
259 1, 1, 1, 1, 1, 1, 1, 1,
260 1, 1, 1, 1, 1, 1, 1, 1,
261 1, 1, 1, 1, 1, 1, 1, 1,
262 1, 1, 1, 1, 1, 1, 1, 1,
263 1, 1, 1, 1, 1, 1, 1, 1,
264 1, 1, 1, 1, 1, 1, 1, 1,
265 1, 1, 1, 1, 1, 1, 1, 1,
266 1, 1, 1, 1, 1, 1, 1, 1,
267 1, 1, 1, 1, 1};
268
269 struct machine_function GTY(())
270 {
271 /* Some local-dynamic TLS symbol name. */
272 const char *some_ld_name;
273
274 /* True if the current function is leaf and uses only leaf regs,
275 so that the SPARC leaf function optimization can be applied.
276 Private version of current_function_uses_only_leaf_regs, see
277 sparc_expand_prologue for the rationale. */
278 int leaf_function_p;
279
280 /* True if the data calculated by sparc_expand_prologue are valid. */
281 bool prologue_data_valid_p;
282 };
283
284 #define sparc_leaf_function_p cfun->machine->leaf_function_p
285 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
286
287 /* Register we pretend to think the frame pointer is allocated to.
288 Normally, this is %fp, but if we are in a leaf procedure, this
289 is %sp+"something". We record "something" separately as it may
290 be too big for reg+constant addressing. */
291 static rtx frame_base_reg;
292 static HOST_WIDE_INT frame_base_offset;
293
294 /* 1 if the next opcode is to be specially indented. */
295 int sparc_indent_opcode = 0;
296
297 static void sparc_init_modes (void);
298 static void scan_record_type (tree, int *, int *, int *);
299 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
300 tree, int, int, int *, int *);
301
302 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
303 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
304
305 static void sparc_output_addr_vec (rtx);
306 static void sparc_output_addr_diff_vec (rtx);
307 static void sparc_output_deferred_case_vectors (void);
308 static rtx sparc_builtin_saveregs (void);
309 static int epilogue_renumber (rtx *, int);
310 static bool sparc_assemble_integer (rtx, unsigned int, int);
311 static int set_extends (rtx);
312 static void load_pic_register (void);
313 static int save_or_restore_regs (int, int, rtx, int, int);
314 static void emit_save_regs (void);
315 static void emit_restore_regs (void);
316 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
317 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
318 #ifdef OBJECT_FORMAT_ELF
319 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
320 #endif
321
322 static int sparc_adjust_cost (rtx, rtx, rtx, int);
323 static int sparc_issue_rate (void);
324 static void sparc_sched_init (FILE *, int, int);
325 static int sparc_use_sched_lookahead (void);
326
327 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
328 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
329 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
330 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
331 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
332
333 static bool sparc_function_ok_for_sibcall (tree, tree);
334 static void sparc_init_libfuncs (void);
335 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
336 HOST_WIDE_INT, tree);
337 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
338 HOST_WIDE_INT, tree);
339 static struct machine_function * sparc_init_machine_status (void);
340 static bool sparc_cannot_force_const_mem (rtx);
341 static rtx sparc_tls_get_addr (void);
342 static rtx sparc_tls_got (void);
343 static const char *get_some_local_dynamic_name (void);
344 static int get_some_local_dynamic_name_1 (rtx *, void *);
345 static bool sparc_rtx_costs (rtx, int, int, int *);
346 static bool sparc_promote_prototypes (tree);
347 static rtx sparc_struct_value_rtx (tree, int);
348 static bool sparc_return_in_memory (tree, tree);
349 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
350 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
351 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
352 enum machine_mode, tree, bool);
353 #ifdef SUBTARGET_ATTRIBUTE_TABLE
354 const struct attribute_spec sparc_attribute_table[];
355 #endif
356 \f
357 /* Option handling. */
358
359 /* Code model option as passed by user. */
360 const char *sparc_cmodel_string;
361 /* Parsed value. */
362 enum cmodel sparc_cmodel;
363
364 char sparc_hard_reg_printed[8];
365
366 struct sparc_cpu_select sparc_select[] =
367 {
368 /* switch name, tune arch */
369 { (char *)0, "default", 1, 1 },
370 { (char *)0, "-mcpu=", 1, 1 },
371 { (char *)0, "-mtune=", 1, 0 },
372 { 0, 0, 0, 0 }
373 };
374
375 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
376 enum processor_type sparc_cpu;
377 \f
378 /* Initialize the GCC target structure. */
379
380 /* The sparc default is to use .half rather than .short for aligned
381 HI objects. Use .word instead of .long on non-ELF systems. */
382 #undef TARGET_ASM_ALIGNED_HI_OP
383 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
384 #ifndef OBJECT_FORMAT_ELF
385 #undef TARGET_ASM_ALIGNED_SI_OP
386 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
387 #endif
388
389 #undef TARGET_ASM_UNALIGNED_HI_OP
390 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
391 #undef TARGET_ASM_UNALIGNED_SI_OP
392 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
393 #undef TARGET_ASM_UNALIGNED_DI_OP
394 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
395
396 /* The target hook has to handle DI-mode values. */
397 #undef TARGET_ASM_INTEGER
398 #define TARGET_ASM_INTEGER sparc_assemble_integer
399
400 #undef TARGET_ASM_FUNCTION_PROLOGUE
401 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
402 #undef TARGET_ASM_FUNCTION_EPILOGUE
403 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
404
405 #undef TARGET_SCHED_ADJUST_COST
406 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
407 #undef TARGET_SCHED_ISSUE_RATE
408 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
409 #undef TARGET_SCHED_INIT
410 #define TARGET_SCHED_INIT sparc_sched_init
411 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
412 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
413
414 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
415 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
416
417 #undef TARGET_INIT_LIBFUNCS
418 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
419
420 #ifdef HAVE_AS_TLS
421 #undef TARGET_HAVE_TLS
422 #define TARGET_HAVE_TLS true
423 #endif
424 #undef TARGET_CANNOT_FORCE_CONST_MEM
425 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
426
427 #undef TARGET_ASM_OUTPUT_MI_THUNK
428 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
429 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
430 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
431
432 #undef TARGET_RTX_COSTS
433 #define TARGET_RTX_COSTS sparc_rtx_costs
434 #undef TARGET_ADDRESS_COST
435 #define TARGET_ADDRESS_COST hook_int_rtx_0
436
437 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
438 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
439 test for this value. */
440 #undef TARGET_PROMOTE_FUNCTION_ARGS
441 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
442
443 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
444 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
445 test for this value. */
446 #undef TARGET_PROMOTE_FUNCTION_RETURN
447 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
448
449 #undef TARGET_PROMOTE_PROTOTYPES
450 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
451
452 #undef TARGET_STRUCT_VALUE_RTX
453 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
454 #undef TARGET_RETURN_IN_MEMORY
455 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
458 #undef TARGET_PASS_BY_REFERENCE
459 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
460
461 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
462 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
463 #undef TARGET_STRICT_ARGUMENT_NAMING
464 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
465
466 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
467 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
468
469 #ifdef SUBTARGET_INSERT_ATTRIBUTES
470 #undef TARGET_INSERT_ATTRIBUTES
471 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
472 #endif
473
474 #ifdef SUBTARGET_ATTRIBUTE_TABLE
475 #undef TARGET_ATTRIBUTE_TABLE
476 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
477 #endif
478
479 struct gcc_target targetm = TARGET_INITIALIZER;
480 \f
481 /* Validate and override various options, and do some machine dependent
482 initialization. */
483
484 void
485 sparc_override_options (void)
486 {
487 static struct code_model {
488 const char *const name;
489 const int value;
490 } const cmodels[] = {
491 { "32", CM_32 },
492 { "medlow", CM_MEDLOW },
493 { "medmid", CM_MEDMID },
494 { "medany", CM_MEDANY },
495 { "embmedany", CM_EMBMEDANY },
496 { 0, 0 }
497 };
498 const struct code_model *cmodel;
499 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
500 static struct cpu_default {
501 const int cpu;
502 const char *const name;
503 } const cpu_default[] = {
504 /* There must be one entry here for each TARGET_CPU value. */
505 { TARGET_CPU_sparc, "cypress" },
506 { TARGET_CPU_sparclet, "tsc701" },
507 { TARGET_CPU_sparclite, "f930" },
508 { TARGET_CPU_v8, "v8" },
509 { TARGET_CPU_hypersparc, "hypersparc" },
510 { TARGET_CPU_sparclite86x, "sparclite86x" },
511 { TARGET_CPU_supersparc, "supersparc" },
512 { TARGET_CPU_v9, "v9" },
513 { TARGET_CPU_ultrasparc, "ultrasparc" },
514 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
515 { 0, 0 }
516 };
517 const struct cpu_default *def;
518 /* Table of values for -m{cpu,tune}=. */
519 static struct cpu_table {
520 const char *const name;
521 const enum processor_type processor;
522 const int disable;
523 const int enable;
524 } const cpu_table[] = {
525 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
526 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
527 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
528 /* TI TMS390Z55 supersparc */
529 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
530 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
531 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
532 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
533 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
534 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
535 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
536 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
537 MASK_SPARCLITE },
538 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
539 /* TEMIC sparclet */
540 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
541 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
542 /* TI ultrasparc I, II, IIi */
543 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
544 /* Although insns using %y are deprecated, it is a clear win on current
545 ultrasparcs. */
546 |MASK_DEPRECATED_V8_INSNS},
547 /* TI ultrasparc III */
548 /* ??? Check if %y issue still holds true in ultra3. */
549 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
550 { 0, 0, 0, 0 }
551 };
552 const struct cpu_table *cpu;
553 const struct sparc_cpu_select *sel;
554 int fpu;
555
556 #ifndef SPARC_BI_ARCH
557 /* Check for unsupported architecture size. */
558 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
559 error ("%s is not supported by this configuration",
560 DEFAULT_ARCH32_P ? "-m64" : "-m32");
561 #endif
562
563 /* We force all 64bit archs to use 128 bit long double */
564 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
565 {
566 error ("-mlong-double-64 not allowed with -m64");
567 target_flags |= MASK_LONG_DOUBLE_128;
568 }
569
570 /* Code model selection. */
571 sparc_cmodel = SPARC_DEFAULT_CMODEL;
572
573 #ifdef SPARC_BI_ARCH
574 if (TARGET_ARCH32)
575 sparc_cmodel = CM_32;
576 #endif
577
578 if (sparc_cmodel_string != NULL)
579 {
580 if (TARGET_ARCH64)
581 {
582 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
583 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
584 break;
585 if (cmodel->name == NULL)
586 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
587 else
588 sparc_cmodel = cmodel->value;
589 }
590 else
591 error ("-mcmodel= is not supported on 32 bit systems");
592 }
593
594 fpu = TARGET_FPU; /* save current -mfpu status */
595
596 /* Set the default CPU. */
597 for (def = &cpu_default[0]; def->name; ++def)
598 if (def->cpu == TARGET_CPU_DEFAULT)
599 break;
600 if (! def->name)
601 abort ();
602 sparc_select[0].string = def->name;
603
604 for (sel = &sparc_select[0]; sel->name; ++sel)
605 {
606 if (sel->string)
607 {
608 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
609 if (! strcmp (sel->string, cpu->name))
610 {
611 if (sel->set_tune_p)
612 sparc_cpu = cpu->processor;
613
614 if (sel->set_arch_p)
615 {
616 target_flags &= ~cpu->disable;
617 target_flags |= cpu->enable;
618 }
619 break;
620 }
621
622 if (! cpu->name)
623 error ("bad value (%s) for %s switch", sel->string, sel->name);
624 }
625 }
626
627 /* If -mfpu or -mno-fpu was explicitly used, don't override with
628 the processor default. Clear MASK_FPU_SET to avoid confusing
629 the reverse mapping from switch values to names. */
630 if (TARGET_FPU_SET)
631 {
632 target_flags = (target_flags & ~MASK_FPU) | fpu;
633 target_flags &= ~MASK_FPU_SET;
634 }
635
636 /* Don't allow -mvis if FPU is disabled. */
637 if (! TARGET_FPU)
638 target_flags &= ~MASK_VIS;
639
640 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
641 are available.
642 -m64 also implies v9. */
643 if (TARGET_VIS || TARGET_ARCH64)
644 {
645 target_flags |= MASK_V9;
646 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
647 }
648
649 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
650 if (TARGET_V9 && TARGET_ARCH32)
651 target_flags |= MASK_DEPRECATED_V8_INSNS;
652
653 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
654 if (! TARGET_V9 || TARGET_ARCH64)
655 target_flags &= ~MASK_V8PLUS;
656
657 /* Don't use stack biasing in 32 bit mode. */
658 if (TARGET_ARCH32)
659 target_flags &= ~MASK_STACK_BIAS;
660
661 /* Supply a default value for align_functions. */
662 if (align_functions == 0
663 && (sparc_cpu == PROCESSOR_ULTRASPARC
664 || sparc_cpu == PROCESSOR_ULTRASPARC3))
665 align_functions = 32;
666
667 /* Validate PCC_STRUCT_RETURN. */
668 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
669 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
670
671 /* Only use .uaxword when compiling for a 64-bit target. */
672 if (!TARGET_ARCH64)
673 targetm.asm_out.unaligned_op.di = NULL;
674
675 /* Do various machine dependent initializations. */
676 sparc_init_modes ();
677
678 /* Acquire a unique set number for our register saves and restores. */
679 sparc_sr_alias_set = new_alias_set ();
680
681 /* Set up function hooks. */
682 init_machine_status = sparc_init_machine_status;
683
684 switch (sparc_cpu)
685 {
686 case PROCESSOR_V7:
687 case PROCESSOR_CYPRESS:
688 sparc_costs = &cypress_costs;
689 break;
690 case PROCESSOR_V8:
691 case PROCESSOR_SPARCLITE:
692 case PROCESSOR_SUPERSPARC:
693 sparc_costs = &supersparc_costs;
694 break;
695 case PROCESSOR_F930:
696 case PROCESSOR_F934:
697 case PROCESSOR_HYPERSPARC:
698 case PROCESSOR_SPARCLITE86X:
699 sparc_costs = &hypersparc_costs;
700 break;
701 case PROCESSOR_SPARCLET:
702 case PROCESSOR_TSC701:
703 sparc_costs = &sparclet_costs;
704 break;
705 case PROCESSOR_V9:
706 case PROCESSOR_ULTRASPARC:
707 sparc_costs = &ultrasparc_costs;
708 break;
709 case PROCESSOR_ULTRASPARC3:
710 sparc_costs = &ultrasparc3_costs;
711 break;
712 };
713 }
714 \f
715 #ifdef SUBTARGET_ATTRIBUTE_TABLE
716 /* Table of valid machine attributes. */
717 const struct attribute_spec sparc_attribute_table[] =
718 {
719 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
720 SUBTARGET_ATTRIBUTE_TABLE,
721 { NULL, 0, 0, false, false, false, NULL }
722 };
723 #endif
724 \f
725 /* Miscellaneous utilities. */
726
727 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
728 or branch on register contents instructions. */
729
730 int
731 v9_regcmp_p (enum rtx_code code)
732 {
733 return (code == EQ || code == NE || code == GE || code == LT
734 || code == LE || code == GT);
735 }
736
737 \f
738 /* Operand constraints. */
739
740 /* Return nonzero only if OP is a register of mode MODE,
741 or const0_rtx. */
742
743 int
744 reg_or_0_operand (rtx op, enum machine_mode mode)
745 {
746 if (register_operand (op, mode))
747 return 1;
748 if (op == const0_rtx)
749 return 1;
750 if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
751 && CONST_DOUBLE_HIGH (op) == 0
752 && CONST_DOUBLE_LOW (op) == 0)
753 return 1;
754 if (fp_zero_operand (op, mode))
755 return 1;
756 return 0;
757 }
758
759 /* Return nonzero only if OP is const1_rtx. */
760
761 int
762 const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
763 {
764 return op == const1_rtx;
765 }
766
767 /* Nonzero if OP is a floating point value with value 0.0. */
768
769 int
770 fp_zero_operand (rtx op, enum machine_mode mode)
771 {
772 if (GET_MODE_CLASS (GET_MODE (op)) != MODE_FLOAT)
773 return 0;
774 return op == CONST0_RTX (mode);
775 }
776
777 /* Nonzero if OP is a register operand in floating point register. */
778
779 int
780 fp_register_operand (rtx op, enum machine_mode mode)
781 {
782 if (! register_operand (op, mode))
783 return 0;
784 if (GET_CODE (op) == SUBREG)
785 op = SUBREG_REG (op);
786 return GET_CODE (op) == REG && SPARC_FP_REG_P (REGNO (op));
787 }
788
789 /* Nonzero if OP is a floating point constant which can
790 be loaded into an integer register using a single
791 sethi instruction. */
792
793 int
794 fp_sethi_p (rtx op)
795 {
796 if (GET_CODE (op) == CONST_DOUBLE)
797 {
798 REAL_VALUE_TYPE r;
799 long i;
800
801 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
802 if (REAL_VALUES_EQUAL (r, dconst0) &&
803 ! REAL_VALUE_MINUS_ZERO (r))
804 return 0;
805 REAL_VALUE_TO_TARGET_SINGLE (r, i);
806 if (SPARC_SETHI_P (i))
807 return 1;
808 }
809
810 return 0;
811 }
812
813 /* Nonzero if OP is a floating point constant which can
814 be loaded into an integer register using a single
815 mov instruction. */
816
817 int
818 fp_mov_p (rtx op)
819 {
820 if (GET_CODE (op) == CONST_DOUBLE)
821 {
822 REAL_VALUE_TYPE r;
823 long i;
824
825 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
826 if (REAL_VALUES_EQUAL (r, dconst0) &&
827 ! REAL_VALUE_MINUS_ZERO (r))
828 return 0;
829 REAL_VALUE_TO_TARGET_SINGLE (r, i);
830 if (SPARC_SIMM13_P (i))
831 return 1;
832 }
833
834 return 0;
835 }
836
837 /* Nonzero if OP is a floating point constant which can
838 be loaded into an integer register using a high/losum
839 instruction sequence. */
840
841 int
842 fp_high_losum_p (rtx op)
843 {
844 /* The constraints calling this should only be in
845 SFmode move insns, so any constant which cannot
846 be moved using a single insn will do. */
847 if (GET_CODE (op) == CONST_DOUBLE)
848 {
849 REAL_VALUE_TYPE r;
850 long i;
851
852 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
853 if (REAL_VALUES_EQUAL (r, dconst0) &&
854 ! REAL_VALUE_MINUS_ZERO (r))
855 return 0;
856 REAL_VALUE_TO_TARGET_SINGLE (r, i);
857 if (! SPARC_SETHI_P (i)
858 && ! SPARC_SIMM13_P (i))
859 return 1;
860 }
861
862 return 0;
863 }
864
865 /* Nonzero if OP is an integer register. */
866
867 int
868 intreg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
869 {
870 return (register_operand (op, SImode)
871 || (TARGET_ARCH64 && register_operand (op, DImode)));
872 }
873
874 /* Nonzero if OP is a floating point condition code register. */
875
876 int
877 fcc_reg_operand (rtx op, enum machine_mode mode)
878 {
879 /* This can happen when recog is called from combine. Op may be a MEM.
880 Fail instead of calling abort in this case. */
881 if (GET_CODE (op) != REG)
882 return 0;
883
884 if (mode != VOIDmode && mode != GET_MODE (op))
885 return 0;
886 if (mode == VOIDmode
887 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
888 return 0;
889
890 #if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
891 if (reg_renumber == 0)
892 return REGNO (op) >= FIRST_PSEUDO_REGISTER;
893 return REGNO_OK_FOR_CCFP_P (REGNO (op));
894 #else
895 return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
896 #endif
897 }
898
899 /* Nonzero if OP is a floating point condition code fcc0 register. */
900
901 int
902 fcc0_reg_operand (rtx op, enum machine_mode mode)
903 {
904 /* This can happen when recog is called from combine. Op may be a MEM.
905 Fail instead of calling abort in this case. */
906 if (GET_CODE (op) != REG)
907 return 0;
908
909 if (mode != VOIDmode && mode != GET_MODE (op))
910 return 0;
911 if (mode == VOIDmode
912 && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
913 return 0;
914
915 return REGNO (op) == SPARC_FCC_REG;
916 }
917
918 /* Nonzero if OP is an integer or floating point condition code register. */
919
920 int
921 icc_or_fcc_reg_operand (rtx op, enum machine_mode mode)
922 {
923 if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
924 {
925 if (mode != VOIDmode && mode != GET_MODE (op))
926 return 0;
927 if (mode == VOIDmode
928 && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
929 return 0;
930 return 1;
931 }
932
933 return fcc_reg_operand (op, mode);
934 }
935
936 /* Call insn on SPARC can take a PC-relative constant address, or any regular
937 memory address. */
938
939 int
940 call_operand (rtx op, enum machine_mode mode)
941 {
942 if (GET_CODE (op) != MEM)
943 abort ();
944 op = XEXP (op, 0);
945 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
946 }
947
948 int
949 call_operand_address (rtx op, enum machine_mode mode)
950 {
951 return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
952 }
953
954 /* If OP is a SYMBOL_REF of a thread-local symbol, return its TLS mode,
955 otherwise return 0. */
956
957 int
958 tls_symbolic_operand (rtx op)
959 {
960 if (GET_CODE (op) != SYMBOL_REF)
961 return 0;
962 return SYMBOL_REF_TLS_MODEL (op);
963 }
964
965 int
966 tgd_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
967 {
968 return tls_symbolic_operand (op) == TLS_MODEL_GLOBAL_DYNAMIC;
969 }
970
971 int
972 tld_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
973 {
974 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_DYNAMIC;
975 }
976
977 int
978 tie_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
979 {
980 return tls_symbolic_operand (op) == TLS_MODEL_INITIAL_EXEC;
981 }
982
983 int
984 tle_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
985 {
986 return tls_symbolic_operand (op) == TLS_MODEL_LOCAL_EXEC;
987 }
988
989 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
990 reference and a constant. */
991
992 int
993 symbolic_operand (register rtx op, enum machine_mode mode)
994 {
995 enum machine_mode omode = GET_MODE (op);
996
997 if (omode != mode && omode != VOIDmode && mode != VOIDmode)
998 return 0;
999
1000 switch (GET_CODE (op))
1001 {
1002 case SYMBOL_REF:
1003 return !SYMBOL_REF_TLS_MODEL (op);
1004
1005 case LABEL_REF:
1006 return 1;
1007
1008 case CONST:
1009 op = XEXP (op, 0);
1010 return (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1011 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
1012 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1013 && GET_CODE (XEXP (op, 1)) == CONST_INT);
1014
1015 default:
1016 return 0;
1017 }
1018 }
1019
1020 /* Return truth value of statement that OP is a symbolic memory
1021 operand of mode MODE. */
1022
1023 int
1024 symbolic_memory_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1025 {
1026 if (GET_CODE (op) == SUBREG)
1027 op = SUBREG_REG (op);
1028 if (GET_CODE (op) != MEM)
1029 return 0;
1030 op = XEXP (op, 0);
1031 return ((GET_CODE (op) == SYMBOL_REF && !SYMBOL_REF_TLS_MODEL (op))
1032 || GET_CODE (op) == CONST || GET_CODE (op) == HIGH
1033 || GET_CODE (op) == LABEL_REF);
1034 }
1035
1036 /* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
1037
1038 int
1039 label_ref_operand (rtx op, enum machine_mode mode)
1040 {
1041 if (GET_CODE (op) != LABEL_REF)
1042 return 0;
1043 if (GET_MODE (op) != mode)
1044 return 0;
1045 return 1;
1046 }
1047
1048 /* Return 1 if the operand is an argument used in generating pic references
1049 in either the medium/low or medium/anywhere code models of sparc64. */
1050
1051 int
1052 sp64_medium_pic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1053 {
1054 /* Check for (const (minus (symbol_ref:GOT)
1055 (const (minus (label) (pc))))). */
1056 if (GET_CODE (op) != CONST)
1057 return 0;
1058 op = XEXP (op, 0);
1059 if (GET_CODE (op) != MINUS)
1060 return 0;
1061 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
1062 return 0;
1063 /* ??? Ensure symbol is GOT. */
1064 if (GET_CODE (XEXP (op, 1)) != CONST)
1065 return 0;
1066 if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
1067 return 0;
1068 return 1;
1069 }
1070
1071 /* Return 1 if the operand is a data segment reference. This includes
1072 the readonly data segment, or in other words anything but the text segment.
1073 This is needed in the medium/anywhere code model on v9. These values
1074 are accessed with EMBMEDANY_BASE_REG. */
1075
1076 int
1077 data_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1078 {
1079 switch (GET_CODE (op))
1080 {
1081 case SYMBOL_REF :
1082 return ! SYMBOL_REF_FUNCTION_P (op);
1083 case PLUS :
1084 /* Assume canonical format of symbol + constant.
1085 Fall through. */
1086 case CONST :
1087 return data_segment_operand (XEXP (op, 0), VOIDmode);
1088 default :
1089 return 0;
1090 }
1091 }
1092
1093 /* Return 1 if the operand is a text segment reference.
1094 This is needed in the medium/anywhere code model on v9. */
1095
1096 int
1097 text_segment_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1098 {
1099 switch (GET_CODE (op))
1100 {
1101 case LABEL_REF :
1102 return 1;
1103 case SYMBOL_REF :
1104 return SYMBOL_REF_FUNCTION_P (op);
1105 case PLUS :
1106 /* Assume canonical format of symbol + constant.
1107 Fall through. */
1108 case CONST :
1109 return text_segment_operand (XEXP (op, 0), VOIDmode);
1110 default :
1111 return 0;
1112 }
1113 }
1114
1115 /* Return 1 if the operand is either a register or a memory operand that is
1116 not symbolic. */
1117
1118 int
1119 reg_or_nonsymb_mem_operand (register rtx op, enum machine_mode mode)
1120 {
1121 if (register_operand (op, mode))
1122 return 1;
1123
1124 if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
1125 return 1;
1126
1127 return 0;
1128 }
1129
1130 int
1131 splittable_symbolic_memory_operand (rtx op,
1132 enum machine_mode mode ATTRIBUTE_UNUSED)
1133 {
1134 if (GET_CODE (op) != MEM)
1135 return 0;
1136 if (! symbolic_operand (XEXP (op, 0), Pmode))
1137 return 0;
1138 return 1;
1139 }
1140
1141 int
1142 splittable_immediate_memory_operand (rtx op,
1143 enum machine_mode mode ATTRIBUTE_UNUSED)
1144 {
1145 if (GET_CODE (op) != MEM)
1146 return 0;
1147 if (! immediate_operand (XEXP (op, 0), Pmode))
1148 return 0;
1149 return 1;
1150 }
1151
1152 /* Return truth value of whether OP is EQ or NE. */
1153
1154 int
1155 eq_or_neq (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1156 {
1157 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
1158 }
1159
1160 /* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
1161 or LTU for non-floating-point. We handle those specially. */
1162
1163 int
1164 normal_comp_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1165 {
1166 enum rtx_code code;
1167
1168 if (!COMPARISON_P (op))
1169 return 0;
1170
1171 if (GET_MODE (XEXP (op, 0)) == CCFPmode
1172 || GET_MODE (XEXP (op, 0)) == CCFPEmode)
1173 return 1;
1174
1175 code = GET_CODE (op);
1176 return (code != NE && code != EQ && code != GEU && code != LTU);
1177 }
1178
1179 /* Return 1 if this is a comparison operator. This allows the use of
1180 MATCH_OPERATOR to recognize all the branch insns. */
1181
1182 int
1183 noov_compare_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1184 {
1185 enum rtx_code code;
1186
1187 if (!COMPARISON_P (op))
1188 return 0;
1189
1190 code = GET_CODE (op);
1191 if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode
1192 || GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
1193 /* These are the only branches which work with CC_NOOVmode. */
1194 return (code == EQ || code == NE || code == GE || code == LT);
1195 return 1;
1196 }
1197
1198 /* Return 1 if this is a 64-bit comparison operator. This allows the use of
1199 MATCH_OPERATOR to recognize all the branch insns. */
1200
1201 int
1202 noov_compare64_op (register rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1203 {
1204 enum rtx_code code;
1205
1206 if (! TARGET_V9)
1207 return 0;
1208
1209 if (!COMPARISON_P (op))
1210 return 0;
1211
1212 code = GET_CODE (op);
1213 if (GET_MODE (XEXP (op, 0)) == CCX_NOOVmode)
1214 /* These are the only branches which work with CCX_NOOVmode. */
1215 return (code == EQ || code == NE || code == GE || code == LT);
1216 return (GET_MODE (XEXP (op, 0)) == CCXmode);
1217 }
1218
1219 /* Nonzero if OP is a comparison operator suitable for use in v9
1220 conditional move or branch on register contents instructions. */
1221
1222 int
1223 v9_regcmp_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1224 {
1225 enum rtx_code code;
1226
1227 if (!COMPARISON_P (op))
1228 return 0;
1229
1230 code = GET_CODE (op);
1231 return v9_regcmp_p (code);
1232 }
1233
1234 /* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
1235
1236 int
1237 extend_op (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1238 {
1239 return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
1240 }
1241
1242 /* Return nonzero if OP is an operator of mode MODE which can set
1243 the condition codes explicitly. We do not include PLUS and MINUS
1244 because these require CC_NOOVmode, which we handle explicitly. */
1245
1246 int
1247 cc_arithop (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1248 {
1249 if (GET_CODE (op) == AND
1250 || GET_CODE (op) == IOR
1251 || GET_CODE (op) == XOR)
1252 return 1;
1253
1254 return 0;
1255 }
1256
1257 /* Return nonzero if OP is an operator of mode MODE which can bitwise
1258 complement its second operand and set the condition codes explicitly. */
1259
1260 int
1261 cc_arithopn (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1262 {
1263 /* XOR is not here because combine canonicalizes (xor (not ...) ...)
1264 and (xor ... (not ...)) to (not (xor ...)). */
1265 return (GET_CODE (op) == AND
1266 || GET_CODE (op) == IOR);
1267 }
1268 \f
1269 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1270 signed 13 bit immediate field. This is an acceptable SImode operand for
1271 most 3 address instructions. */
1272
1273 int
1274 arith_operand (rtx op, enum machine_mode mode)
1275 {
1276 if (register_operand (op, mode))
1277 return 1;
1278 if (GET_CODE (op) != CONST_INT)
1279 return 0;
1280 return SMALL_INT32 (op);
1281 }
1282
1283 /* Return true if OP is a constant 4096 */
1284
1285 int
1286 arith_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1287 {
1288 if (GET_CODE (op) != CONST_INT)
1289 return 0;
1290 else
1291 return INTVAL (op) == 4096;
1292 }
1293
1294 /* Return true if OP is suitable as second operand for add/sub */
1295
1296 int
1297 arith_add_operand (rtx op, enum machine_mode mode)
1298 {
1299 return arith_operand (op, mode) || arith_4096_operand (op, mode);
1300 }
1301
1302 /* Return true if OP is a CONST_INT or a CONST_DOUBLE which can fit in the
1303 immediate field of OR and XOR instructions. Used for 64-bit
1304 constant formation patterns. */
1305 int
1306 const64_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1307 {
1308 return ((GET_CODE (op) == CONST_INT
1309 && SPARC_SIMM13_P (INTVAL (op)))
1310 #if HOST_BITS_PER_WIDE_INT != 64
1311 || (GET_CODE (op) == CONST_DOUBLE
1312 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1313 && (CONST_DOUBLE_HIGH (op) ==
1314 ((CONST_DOUBLE_LOW (op) & 0x80000000) != 0 ?
1315 (HOST_WIDE_INT)-1 : 0)))
1316 #endif
1317 );
1318 }
1319
1320 /* The same, but only for sethi instructions. */
1321 int
1322 const64_high_operand (rtx op, enum machine_mode mode)
1323 {
1324 return ((GET_CODE (op) == CONST_INT
1325 && (INTVAL (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1326 && SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1327 )
1328 || (GET_CODE (op) == CONST_DOUBLE
1329 && CONST_DOUBLE_HIGH (op) == 0
1330 && (CONST_DOUBLE_LOW (op) & ~(HOST_WIDE_INT)0x3ff) != 0
1331 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op))));
1332 }
1333
1334 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1335 signed 11 bit immediate field. This is an acceptable SImode operand for
1336 the movcc instructions. */
1337
1338 int
1339 arith11_operand (rtx op, enum machine_mode mode)
1340 {
1341 return (register_operand (op, mode)
1342 || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
1343 }
1344
1345 /* Return true if OP is a register, or is a CONST_INT that can fit in a
1346 signed 10 bit immediate field. This is an acceptable SImode operand for
1347 the movrcc instructions. */
1348
1349 int
1350 arith10_operand (rtx op, enum machine_mode mode)
1351 {
1352 return (register_operand (op, mode)
1353 || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
1354 }
1355
1356 /* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
1357 immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
1358 immediate field.
1359 ARCH64: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1360 can fit in a 13 bit immediate field. This is an acceptable DImode operand
1361 for most 3 address instructions. */
1362
1363 int
1364 arith_double_operand (rtx op, enum machine_mode mode)
1365 {
1366 return (register_operand (op, mode)
1367 || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
1368 || (! TARGET_ARCH64
1369 && GET_CODE (op) == CONST_DOUBLE
1370 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1371 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
1372 || (TARGET_ARCH64
1373 && GET_CODE (op) == CONST_DOUBLE
1374 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
1375 && ((CONST_DOUBLE_HIGH (op) == -1
1376 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
1377 || (CONST_DOUBLE_HIGH (op) == 0
1378 && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
1379 }
1380
1381 /* Return true if OP is a constant 4096 for DImode on ARCH64 */
1382
1383 int
1384 arith_double_4096_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1385 {
1386 return (TARGET_ARCH64 &&
1387 ((GET_CODE (op) == CONST_INT && INTVAL (op) == 4096) ||
1388 (GET_CODE (op) == CONST_DOUBLE &&
1389 CONST_DOUBLE_LOW (op) == 4096 &&
1390 CONST_DOUBLE_HIGH (op) == 0)));
1391 }
1392
1393 /* Return true if OP is suitable as second operand for add/sub in DImode */
1394
1395 int
1396 arith_double_add_operand (rtx op, enum machine_mode mode)
1397 {
1398 return arith_double_operand (op, mode) || arith_double_4096_operand (op, mode);
1399 }
1400
1401 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1402 can fit in an 11 bit immediate field. This is an acceptable DImode
1403 operand for the movcc instructions. */
1404 /* ??? Replace with arith11_operand? */
1405
1406 int
1407 arith11_double_operand (rtx op, enum machine_mode mode)
1408 {
1409 return (register_operand (op, mode)
1410 || (GET_CODE (op) == CONST_DOUBLE
1411 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1412 && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
1413 && ((CONST_DOUBLE_HIGH (op) == -1
1414 && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
1415 || (CONST_DOUBLE_HIGH (op) == 0
1416 && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
1417 || (GET_CODE (op) == CONST_INT
1418 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1419 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
1420 }
1421
1422 /* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
1423 can fit in an 10 bit immediate field. This is an acceptable DImode
1424 operand for the movrcc instructions. */
1425 /* ??? Replace with arith10_operand? */
1426
1427 int
1428 arith10_double_operand (rtx op, enum machine_mode mode)
1429 {
1430 return (register_operand (op, mode)
1431 || (GET_CODE (op) == CONST_DOUBLE
1432 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1433 && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
1434 && ((CONST_DOUBLE_HIGH (op) == -1
1435 && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
1436 || (CONST_DOUBLE_HIGH (op) == 0
1437 && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
1438 || (GET_CODE (op) == CONST_INT
1439 && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
1440 && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
1441 }
1442
1443 /* Return truth value of whether OP is an integer which fits the
1444 range constraining immediate operands in most three-address insns,
1445 which have a 13 bit immediate field. */
1446
1447 int
1448 small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1449 {
1450 return (GET_CODE (op) == CONST_INT && SMALL_INT (op));
1451 }
1452
1453 int
1454 small_int_or_double (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1455 {
1456 return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
1457 || (GET_CODE (op) == CONST_DOUBLE
1458 && CONST_DOUBLE_HIGH (op) == 0
1459 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))));
1460 }
1461
1462 /* Recognize operand values for the umul instruction. That instruction sign
1463 extends immediate values just like all other sparc instructions, but
1464 interprets the extended result as an unsigned number. */
1465
1466 int
1467 uns_small_int (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1468 {
1469 #if HOST_BITS_PER_WIDE_INT > 32
1470 /* All allowed constants will fit a CONST_INT. */
1471 return (GET_CODE (op) == CONST_INT
1472 && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
1473 || (INTVAL (op) >= 0xFFFFF000
1474 && INTVAL (op) <= 0xFFFFFFFF)));
1475 #else
1476 return ((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
1477 || (GET_CODE (op) == CONST_DOUBLE
1478 && CONST_DOUBLE_HIGH (op) == 0
1479 && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000));
1480 #endif
1481 }
1482
1483 int
1484 uns_arith_operand (rtx op, enum machine_mode mode)
1485 {
1486 return register_operand (op, mode) || uns_small_int (op, mode);
1487 }
1488
1489 /* Return truth value of statement that OP is a call-clobbered register. */
1490 int
1491 clobbered_register (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1492 {
1493 return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
1494 }
1495
1496 /* Return 1 if OP is a valid operand for the source of a move insn. */
1497
1498 int
1499 input_operand (rtx op, enum machine_mode mode)
1500 {
1501 /* If both modes are non-void they must be the same. */
1502 if (mode != VOIDmode && GET_MODE (op) != VOIDmode && mode != GET_MODE (op))
1503 return 0;
1504
1505 /* Allow any one instruction integer constant, and all CONST_INT
1506 variants when we are working in DImode and !arch64. */
1507 if (GET_MODE_CLASS (mode) == MODE_INT
1508 && ((GET_CODE (op) == CONST_INT
1509 && (SPARC_SETHI_P (INTVAL (op) & GET_MODE_MASK (mode))
1510 || SPARC_SIMM13_P (INTVAL (op))
1511 || (mode == DImode
1512 && ! TARGET_ARCH64)))
1513 || (TARGET_ARCH64
1514 && GET_CODE (op) == CONST_DOUBLE
1515 && ((CONST_DOUBLE_HIGH (op) == 0
1516 && SPARC_SETHI_P (CONST_DOUBLE_LOW (op)))
1517 ||
1518 #if HOST_BITS_PER_WIDE_INT == 64
1519 (CONST_DOUBLE_HIGH (op) == 0
1520 && SPARC_SIMM13_P (CONST_DOUBLE_LOW (op)))
1521 #else
1522 (SPARC_SIMM13_P (CONST_DOUBLE_LOW (op))
1523 && (((CONST_DOUBLE_LOW (op) & 0x80000000) == 0
1524 && CONST_DOUBLE_HIGH (op) == 0)
1525 || (CONST_DOUBLE_HIGH (op) == -1
1526 && CONST_DOUBLE_LOW (op) & 0x80000000) != 0))
1527 #endif
1528 ))))
1529 return 1;
1530
1531 /* If !arch64 and this is a DImode const, allow it so that
1532 the splits can be generated. */
1533 if (! TARGET_ARCH64
1534 && mode == DImode
1535 && GET_CODE (op) == CONST_DOUBLE)
1536 return 1;
1537
1538 if (register_operand (op, mode))
1539 return 1;
1540
1541 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1542 && GET_CODE (op) == CONST_DOUBLE)
1543 return 1;
1544
1545 /* If this is a SUBREG, look inside so that we handle
1546 paradoxical ones. */
1547 if (GET_CODE (op) == SUBREG)
1548 op = SUBREG_REG (op);
1549
1550 /* Check for valid MEM forms. */
1551 if (GET_CODE (op) == MEM)
1552 return memory_address_p (mode, XEXP (op, 0));
1553
1554 return 0;
1555 }
1556
1557 /* Return 1 if OP is valid for the lhs of a compare insn. */
1558
1559 int
1560 compare_operand (rtx op, enum machine_mode mode)
1561 {
1562 if (GET_CODE (op) == ZERO_EXTRACT)
1563 return (register_operand (XEXP (op, 0), mode)
1564 && small_int_or_double (XEXP (op, 1), mode)
1565 && small_int_or_double (XEXP (op, 2), mode)
1566 /* This matches cmp_zero_extract. */
1567 && ((mode == SImode
1568 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1569 && INTVAL (XEXP (op, 2)) > 19)
1570 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1571 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 19)))
1572 /* This matches cmp_zero_extract_sp64. */
1573 || (mode == DImode
1574 && TARGET_ARCH64
1575 && ((GET_CODE (XEXP (op, 2)) == CONST_INT
1576 && INTVAL (XEXP (op, 2)) > 51)
1577 || (GET_CODE (XEXP (op, 2)) == CONST_DOUBLE
1578 && CONST_DOUBLE_LOW (XEXP (op, 2)) > 51)))));
1579 else
1580 return register_operand (op, mode);
1581 }
1582
1583 \f
1584 /* We know it can't be done in one insn when we get here,
1585 the movsi expander guarantees this. */
1586 void
1587 sparc_emit_set_const32 (rtx op0, rtx op1)
1588 {
1589 enum machine_mode mode = GET_MODE (op0);
1590 rtx temp;
1591
1592 if (GET_CODE (op1) == CONST_INT)
1593 {
1594 HOST_WIDE_INT value = INTVAL (op1);
1595
1596 if (SPARC_SETHI_P (value & GET_MODE_MASK (mode))
1597 || SPARC_SIMM13_P (value))
1598 abort ();
1599 }
1600
1601 /* Full 2-insn decomposition is needed. */
1602 if (reload_in_progress || reload_completed)
1603 temp = op0;
1604 else
1605 temp = gen_reg_rtx (mode);
1606
1607 if (GET_CODE (op1) == CONST_INT)
1608 {
1609 /* Emit them as real moves instead of a HIGH/LO_SUM,
1610 this way CSE can see everything and reuse intermediate
1611 values if it wants. */
1612 if (TARGET_ARCH64
1613 && HOST_BITS_PER_WIDE_INT != 64
1614 && (INTVAL (op1) & 0x80000000) != 0)
1615 emit_insn (gen_rtx_SET
1616 (VOIDmode, temp,
1617 immed_double_const (INTVAL (op1) & ~(HOST_WIDE_INT)0x3ff,
1618 0, DImode)));
1619 else
1620 emit_insn (gen_rtx_SET (VOIDmode, temp,
1621 GEN_INT (INTVAL (op1)
1622 & ~(HOST_WIDE_INT)0x3ff)));
1623
1624 emit_insn (gen_rtx_SET (VOIDmode,
1625 op0,
1626 gen_rtx_IOR (mode, temp,
1627 GEN_INT (INTVAL (op1) & 0x3ff))));
1628 }
1629 else
1630 {
1631 /* A symbol, emit in the traditional way. */
1632 emit_insn (gen_rtx_SET (VOIDmode, temp,
1633 gen_rtx_HIGH (mode, op1)));
1634 emit_insn (gen_rtx_SET (VOIDmode,
1635 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1636
1637 }
1638 }
1639
1640 \f
1641 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1642 If TEMP is nonzero, we are forbidden to use any other scratch
1643 registers. Otherwise, we are allowed to generate them as needed.
1644
1645 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1646 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1647 void
1648 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1649 {
1650 rtx temp1, temp2, temp3, temp4, temp5;
1651 rtx ti_temp = 0;
1652
1653 if (temp && GET_MODE (temp) == TImode)
1654 {
1655 ti_temp = temp;
1656 temp = gen_rtx_REG (DImode, REGNO (temp));
1657 }
1658
1659 /* SPARC-V9 code-model support. */
1660 switch (sparc_cmodel)
1661 {
1662 case CM_MEDLOW:
1663 /* The range spanned by all instructions in the object is less
1664 than 2^31 bytes (2GB) and the distance from any instruction
1665 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1666 than 2^31 bytes (2GB).
1667
1668 The executable must be in the low 4TB of the virtual address
1669 space.
1670
1671 sethi %hi(symbol), %temp1
1672 or %temp1, %lo(symbol), %reg */
1673 if (temp)
1674 temp1 = temp; /* op0 is allowed. */
1675 else
1676 temp1 = gen_reg_rtx (DImode);
1677
1678 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1679 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1680 break;
1681
1682 case CM_MEDMID:
1683 /* The range spanned by all instructions in the object is less
1684 than 2^31 bytes (2GB) and the distance from any instruction
1685 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1686 than 2^31 bytes (2GB).
1687
1688 The executable must be in the low 16TB of the virtual address
1689 space.
1690
1691 sethi %h44(symbol), %temp1
1692 or %temp1, %m44(symbol), %temp2
1693 sllx %temp2, 12, %temp3
1694 or %temp3, %l44(symbol), %reg */
1695 if (temp)
1696 {
1697 temp1 = op0;
1698 temp2 = op0;
1699 temp3 = temp; /* op0 is allowed. */
1700 }
1701 else
1702 {
1703 temp1 = gen_reg_rtx (DImode);
1704 temp2 = gen_reg_rtx (DImode);
1705 temp3 = gen_reg_rtx (DImode);
1706 }
1707
1708 emit_insn (gen_seth44 (temp1, op1));
1709 emit_insn (gen_setm44 (temp2, temp1, op1));
1710 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1711 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1712 emit_insn (gen_setl44 (op0, temp3, op1));
1713 break;
1714
1715 case CM_MEDANY:
1716 /* The range spanned by all instructions in the object is less
1717 than 2^31 bytes (2GB) and the distance from any instruction
1718 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1719 than 2^31 bytes (2GB).
1720
1721 The executable can be placed anywhere in the virtual address
1722 space.
1723
1724 sethi %hh(symbol), %temp1
1725 sethi %lm(symbol), %temp2
1726 or %temp1, %hm(symbol), %temp3
1727 sllx %temp3, 32, %temp4
1728 or %temp4, %temp2, %temp5
1729 or %temp5, %lo(symbol), %reg */
1730 if (temp)
1731 {
1732 /* It is possible that one of the registers we got for operands[2]
1733 might coincide with that of operands[0] (which is why we made
1734 it TImode). Pick the other one to use as our scratch. */
1735 if (rtx_equal_p (temp, op0))
1736 {
1737 if (ti_temp)
1738 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1739 else
1740 abort();
1741 }
1742 temp1 = op0;
1743 temp2 = temp; /* op0 is _not_ allowed, see above. */
1744 temp3 = op0;
1745 temp4 = op0;
1746 temp5 = op0;
1747 }
1748 else
1749 {
1750 temp1 = gen_reg_rtx (DImode);
1751 temp2 = gen_reg_rtx (DImode);
1752 temp3 = gen_reg_rtx (DImode);
1753 temp4 = gen_reg_rtx (DImode);
1754 temp5 = gen_reg_rtx (DImode);
1755 }
1756
1757 emit_insn (gen_sethh (temp1, op1));
1758 emit_insn (gen_setlm (temp2, op1));
1759 emit_insn (gen_sethm (temp3, temp1, op1));
1760 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1761 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1762 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1763 gen_rtx_PLUS (DImode, temp4, temp2)));
1764 emit_insn (gen_setlo (op0, temp5, op1));
1765 break;
1766
1767 case CM_EMBMEDANY:
1768 /* Old old old backwards compatibility kruft here.
1769 Essentially it is MEDLOW with a fixed 64-bit
1770 virtual base added to all data segment addresses.
1771 Text-segment stuff is computed like MEDANY, we can't
1772 reuse the code above because the relocation knobs
1773 look different.
1774
1775 Data segment: sethi %hi(symbol), %temp1
1776 add %temp1, EMBMEDANY_BASE_REG, %temp2
1777 or %temp2, %lo(symbol), %reg */
1778 if (data_segment_operand (op1, GET_MODE (op1)))
1779 {
1780 if (temp)
1781 {
1782 temp1 = temp; /* op0 is allowed. */
1783 temp2 = op0;
1784 }
1785 else
1786 {
1787 temp1 = gen_reg_rtx (DImode);
1788 temp2 = gen_reg_rtx (DImode);
1789 }
1790
1791 emit_insn (gen_embmedany_sethi (temp1, op1));
1792 emit_insn (gen_embmedany_brsum (temp2, temp1));
1793 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1794 }
1795
1796 /* Text segment: sethi %uhi(symbol), %temp1
1797 sethi %hi(symbol), %temp2
1798 or %temp1, %ulo(symbol), %temp3
1799 sllx %temp3, 32, %temp4
1800 or %temp4, %temp2, %temp5
1801 or %temp5, %lo(symbol), %reg */
1802 else
1803 {
1804 if (temp)
1805 {
1806 /* It is possible that one of the registers we got for operands[2]
1807 might coincide with that of operands[0] (which is why we made
1808 it TImode). Pick the other one to use as our scratch. */
1809 if (rtx_equal_p (temp, op0))
1810 {
1811 if (ti_temp)
1812 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1813 else
1814 abort();
1815 }
1816 temp1 = op0;
1817 temp2 = temp; /* op0 is _not_ allowed, see above. */
1818 temp3 = op0;
1819 temp4 = op0;
1820 temp5 = op0;
1821 }
1822 else
1823 {
1824 temp1 = gen_reg_rtx (DImode);
1825 temp2 = gen_reg_rtx (DImode);
1826 temp3 = gen_reg_rtx (DImode);
1827 temp4 = gen_reg_rtx (DImode);
1828 temp5 = gen_reg_rtx (DImode);
1829 }
1830
1831 emit_insn (gen_embmedany_textuhi (temp1, op1));
1832 emit_insn (gen_embmedany_texthi (temp2, op1));
1833 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1834 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1835 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1836 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1837 gen_rtx_PLUS (DImode, temp4, temp2)));
1838 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1839 }
1840 break;
1841
1842 default:
1843 abort();
1844 }
1845 }
1846
1847 /* These avoid problems when cross compiling. If we do not
1848 go through all this hair then the optimizer will see
1849 invalid REG_EQUAL notes or in some cases none at all. */
1850 static void sparc_emit_set_safe_HIGH64 (rtx, HOST_WIDE_INT);
1851 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1852 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1853 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1854
1855 #if HOST_BITS_PER_WIDE_INT == 64
1856 #define GEN_HIGHINT64(__x) GEN_INT ((__x) & ~(HOST_WIDE_INT)0x3ff)
1857 #define GEN_INT64(__x) GEN_INT (__x)
1858 #else
1859 #define GEN_HIGHINT64(__x) \
1860 immed_double_const ((__x) & ~(HOST_WIDE_INT)0x3ff, 0, DImode)
1861 #define GEN_INT64(__x) \
1862 immed_double_const ((__x) & 0xffffffff, \
1863 ((__x) & 0x80000000 ? -1 : 0), DImode)
1864 #endif
1865
1866 /* The optimizer is not to assume anything about exactly
1867 which bits are set for a HIGH, they are unspecified.
1868 Unfortunately this leads to many missed optimizations
1869 during CSE. We mask out the non-HIGH bits, and matches
1870 a plain movdi, to alleviate this problem. */
1871 static void
1872 sparc_emit_set_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1873 {
1874 emit_insn (gen_rtx_SET (VOIDmode, dest, GEN_HIGHINT64 (val)));
1875 }
1876
1877 static rtx
1878 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1879 {
1880 return gen_rtx_SET (VOIDmode, dest, GEN_INT64 (val));
1881 }
1882
1883 static rtx
1884 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1885 {
1886 return gen_rtx_IOR (DImode, src, GEN_INT64 (val));
1887 }
1888
1889 static rtx
1890 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1891 {
1892 return gen_rtx_XOR (DImode, src, GEN_INT64 (val));
1893 }
1894
1895 /* Worker routines for 64-bit constant formation on arch64.
1896 One of the key things to be doing in these emissions is
1897 to create as many temp REGs as possible. This makes it
1898 possible for half-built constants to be used later when
1899 such values are similar to something required later on.
1900 Without doing this, the optimizer cannot see such
1901 opportunities. */
1902
1903 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1904 unsigned HOST_WIDE_INT, int);
1905
1906 static void
1907 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1908 unsigned HOST_WIDE_INT low_bits, int is_neg)
1909 {
1910 unsigned HOST_WIDE_INT high_bits;
1911
1912 if (is_neg)
1913 high_bits = (~low_bits) & 0xffffffff;
1914 else
1915 high_bits = low_bits;
1916
1917 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1918 if (!is_neg)
1919 {
1920 emit_insn (gen_rtx_SET (VOIDmode, op0,
1921 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1922 }
1923 else
1924 {
1925 /* If we are XOR'ing with -1, then we should emit a one's complement
1926 instead. This way the combiner will notice logical operations
1927 such as ANDN later on and substitute. */
1928 if ((low_bits & 0x3ff) == 0x3ff)
1929 {
1930 emit_insn (gen_rtx_SET (VOIDmode, op0,
1931 gen_rtx_NOT (DImode, temp)));
1932 }
1933 else
1934 {
1935 emit_insn (gen_rtx_SET (VOIDmode, op0,
1936 gen_safe_XOR64 (temp,
1937 (-(HOST_WIDE_INT)0x400
1938 | (low_bits & 0x3ff)))));
1939 }
1940 }
1941 }
1942
1943 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1944 unsigned HOST_WIDE_INT, int);
1945
1946 static void
1947 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1948 unsigned HOST_WIDE_INT high_bits,
1949 unsigned HOST_WIDE_INT low_immediate,
1950 int shift_count)
1951 {
1952 rtx temp2 = op0;
1953
1954 if ((high_bits & 0xfffffc00) != 0)
1955 {
1956 sparc_emit_set_safe_HIGH64 (temp, high_bits);
1957 if ((high_bits & ~0xfffffc00) != 0)
1958 emit_insn (gen_rtx_SET (VOIDmode, op0,
1959 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1960 else
1961 temp2 = temp;
1962 }
1963 else
1964 {
1965 emit_insn (gen_safe_SET64 (temp, high_bits));
1966 temp2 = temp;
1967 }
1968
1969 /* Now shift it up into place. */
1970 emit_insn (gen_rtx_SET (VOIDmode, op0,
1971 gen_rtx_ASHIFT (DImode, temp2,
1972 GEN_INT (shift_count))));
1973
1974 /* If there is a low immediate part piece, finish up by
1975 putting that in as well. */
1976 if (low_immediate != 0)
1977 emit_insn (gen_rtx_SET (VOIDmode, op0,
1978 gen_safe_OR64 (op0, low_immediate)));
1979 }
1980
1981 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1982 unsigned HOST_WIDE_INT);
1983
1984 /* Full 64-bit constant decomposition. Even though this is the
1985 'worst' case, we still optimize a few things away. */
1986 static void
1987 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1988 unsigned HOST_WIDE_INT high_bits,
1989 unsigned HOST_WIDE_INT low_bits)
1990 {
1991 rtx sub_temp;
1992
1993 if (reload_in_progress || reload_completed)
1994 sub_temp = op0;
1995 else
1996 sub_temp = gen_reg_rtx (DImode);
1997
1998 if ((high_bits & 0xfffffc00) != 0)
1999 {
2000 sparc_emit_set_safe_HIGH64 (temp, high_bits);
2001 if ((high_bits & ~0xfffffc00) != 0)
2002 emit_insn (gen_rtx_SET (VOIDmode,
2003 sub_temp,
2004 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
2005 else
2006 sub_temp = temp;
2007 }
2008 else
2009 {
2010 emit_insn (gen_safe_SET64 (temp, high_bits));
2011 sub_temp = temp;
2012 }
2013
2014 if (!reload_in_progress && !reload_completed)
2015 {
2016 rtx temp2 = gen_reg_rtx (DImode);
2017 rtx temp3 = gen_reg_rtx (DImode);
2018 rtx temp4 = gen_reg_rtx (DImode);
2019
2020 emit_insn (gen_rtx_SET (VOIDmode, temp4,
2021 gen_rtx_ASHIFT (DImode, sub_temp,
2022 GEN_INT (32))));
2023
2024 sparc_emit_set_safe_HIGH64 (temp2, low_bits);
2025 if ((low_bits & ~0xfffffc00) != 0)
2026 {
2027 emit_insn (gen_rtx_SET (VOIDmode, temp3,
2028 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
2029 emit_insn (gen_rtx_SET (VOIDmode, op0,
2030 gen_rtx_PLUS (DImode, temp4, temp3)));
2031 }
2032 else
2033 {
2034 emit_insn (gen_rtx_SET (VOIDmode, op0,
2035 gen_rtx_PLUS (DImode, temp4, temp2)));
2036 }
2037 }
2038 else
2039 {
2040 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
2041 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
2042 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
2043 int to_shift = 12;
2044
2045 /* We are in the middle of reload, so this is really
2046 painful. However we do still make an attempt to
2047 avoid emitting truly stupid code. */
2048 if (low1 != const0_rtx)
2049 {
2050 emit_insn (gen_rtx_SET (VOIDmode, op0,
2051 gen_rtx_ASHIFT (DImode, sub_temp,
2052 GEN_INT (to_shift))));
2053 emit_insn (gen_rtx_SET (VOIDmode, op0,
2054 gen_rtx_IOR (DImode, op0, low1)));
2055 sub_temp = op0;
2056 to_shift = 12;
2057 }
2058 else
2059 {
2060 to_shift += 12;
2061 }
2062 if (low2 != const0_rtx)
2063 {
2064 emit_insn (gen_rtx_SET (VOIDmode, op0,
2065 gen_rtx_ASHIFT (DImode, sub_temp,
2066 GEN_INT (to_shift))));
2067 emit_insn (gen_rtx_SET (VOIDmode, op0,
2068 gen_rtx_IOR (DImode, op0, low2)));
2069 sub_temp = op0;
2070 to_shift = 8;
2071 }
2072 else
2073 {
2074 to_shift += 8;
2075 }
2076 emit_insn (gen_rtx_SET (VOIDmode, op0,
2077 gen_rtx_ASHIFT (DImode, sub_temp,
2078 GEN_INT (to_shift))));
2079 if (low3 != const0_rtx)
2080 emit_insn (gen_rtx_SET (VOIDmode, op0,
2081 gen_rtx_IOR (DImode, op0, low3)));
2082 /* phew... */
2083 }
2084 }
2085
2086 /* Analyze a 64-bit constant for certain properties. */
2087 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
2088 unsigned HOST_WIDE_INT,
2089 int *, int *, int *);
2090
2091 static void
2092 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
2093 unsigned HOST_WIDE_INT low_bits,
2094 int *hbsp, int *lbsp, int *abbasp)
2095 {
2096 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
2097 int i;
2098
2099 lowest_bit_set = highest_bit_set = -1;
2100 i = 0;
2101 do
2102 {
2103 if ((lowest_bit_set == -1)
2104 && ((low_bits >> i) & 1))
2105 lowest_bit_set = i;
2106 if ((highest_bit_set == -1)
2107 && ((high_bits >> (32 - i - 1)) & 1))
2108 highest_bit_set = (64 - i - 1);
2109 }
2110 while (++i < 32
2111 && ((highest_bit_set == -1)
2112 || (lowest_bit_set == -1)));
2113 if (i == 32)
2114 {
2115 i = 0;
2116 do
2117 {
2118 if ((lowest_bit_set == -1)
2119 && ((high_bits >> i) & 1))
2120 lowest_bit_set = i + 32;
2121 if ((highest_bit_set == -1)
2122 && ((low_bits >> (32 - i - 1)) & 1))
2123 highest_bit_set = 32 - i - 1;
2124 }
2125 while (++i < 32
2126 && ((highest_bit_set == -1)
2127 || (lowest_bit_set == -1)));
2128 }
2129 /* If there are no bits set this should have gone out
2130 as one instruction! */
2131 if (lowest_bit_set == -1
2132 || highest_bit_set == -1)
2133 abort ();
2134 all_bits_between_are_set = 1;
2135 for (i = lowest_bit_set; i <= highest_bit_set; i++)
2136 {
2137 if (i < 32)
2138 {
2139 if ((low_bits & (1 << i)) != 0)
2140 continue;
2141 }
2142 else
2143 {
2144 if ((high_bits & (1 << (i - 32))) != 0)
2145 continue;
2146 }
2147 all_bits_between_are_set = 0;
2148 break;
2149 }
2150 *hbsp = highest_bit_set;
2151 *lbsp = lowest_bit_set;
2152 *abbasp = all_bits_between_are_set;
2153 }
2154
2155 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
2156
2157 static int
2158 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
2159 unsigned HOST_WIDE_INT low_bits)
2160 {
2161 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
2162
2163 if (high_bits == 0
2164 || high_bits == 0xffffffff)
2165 return 1;
2166
2167 analyze_64bit_constant (high_bits, low_bits,
2168 &highest_bit_set, &lowest_bit_set,
2169 &all_bits_between_are_set);
2170
2171 if ((highest_bit_set == 63
2172 || lowest_bit_set == 0)
2173 && all_bits_between_are_set != 0)
2174 return 1;
2175
2176 if ((highest_bit_set - lowest_bit_set) < 21)
2177 return 1;
2178
2179 return 0;
2180 }
2181
2182 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
2183 unsigned HOST_WIDE_INT,
2184 int, int);
2185
2186 static unsigned HOST_WIDE_INT
2187 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
2188 unsigned HOST_WIDE_INT low_bits,
2189 int lowest_bit_set, int shift)
2190 {
2191 HOST_WIDE_INT hi, lo;
2192
2193 if (lowest_bit_set < 32)
2194 {
2195 lo = (low_bits >> lowest_bit_set) << shift;
2196 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
2197 }
2198 else
2199 {
2200 lo = 0;
2201 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
2202 }
2203 if (hi & lo)
2204 abort ();
2205 return (hi | lo);
2206 }
2207
2208 /* Here we are sure to be arch64 and this is an integer constant
2209 being loaded into a register. Emit the most efficient
2210 insn sequence possible. Detection of all the 1-insn cases
2211 has been done already. */
2212 void
2213 sparc_emit_set_const64 (rtx op0, rtx op1)
2214 {
2215 unsigned HOST_WIDE_INT high_bits, low_bits;
2216 int lowest_bit_set, highest_bit_set;
2217 int all_bits_between_are_set;
2218 rtx temp = 0;
2219
2220 /* Sanity check that we know what we are working with. */
2221 if (! TARGET_ARCH64)
2222 abort ();
2223
2224 if (GET_CODE (op0) != SUBREG)
2225 {
2226 if (GET_CODE (op0) != REG
2227 || (REGNO (op0) >= SPARC_FIRST_FP_REG
2228 && REGNO (op0) <= SPARC_LAST_V9_FP_REG))
2229 abort ();
2230 }
2231
2232 if (reload_in_progress || reload_completed)
2233 temp = op0;
2234
2235 if (GET_CODE (op1) != CONST_DOUBLE
2236 && GET_CODE (op1) != CONST_INT)
2237 {
2238 sparc_emit_set_symbolic_const64 (op0, op1, temp);
2239 return;
2240 }
2241
2242 if (! temp)
2243 temp = gen_reg_rtx (DImode);
2244
2245 if (GET_CODE (op1) == CONST_DOUBLE)
2246 {
2247 #if HOST_BITS_PER_WIDE_INT == 64
2248 high_bits = (CONST_DOUBLE_LOW (op1) >> 32) & 0xffffffff;
2249 low_bits = CONST_DOUBLE_LOW (op1) & 0xffffffff;
2250 #else
2251 high_bits = CONST_DOUBLE_HIGH (op1);
2252 low_bits = CONST_DOUBLE_LOW (op1);
2253 #endif
2254 }
2255 else
2256 {
2257 #if HOST_BITS_PER_WIDE_INT == 64
2258 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
2259 low_bits = (INTVAL (op1) & 0xffffffff);
2260 #else
2261 high_bits = ((INTVAL (op1) < 0) ?
2262 0xffffffff :
2263 0x00000000);
2264 low_bits = INTVAL (op1);
2265 #endif
2266 }
2267
2268 /* low_bits bits 0 --> 31
2269 high_bits bits 32 --> 63 */
2270
2271 analyze_64bit_constant (high_bits, low_bits,
2272 &highest_bit_set, &lowest_bit_set,
2273 &all_bits_between_are_set);
2274
2275 /* First try for a 2-insn sequence. */
2276
2277 /* These situations are preferred because the optimizer can
2278 * do more things with them:
2279 * 1) mov -1, %reg
2280 * sllx %reg, shift, %reg
2281 * 2) mov -1, %reg
2282 * srlx %reg, shift, %reg
2283 * 3) mov some_small_const, %reg
2284 * sllx %reg, shift, %reg
2285 */
2286 if (((highest_bit_set == 63
2287 || lowest_bit_set == 0)
2288 && all_bits_between_are_set != 0)
2289 || ((highest_bit_set - lowest_bit_set) < 12))
2290 {
2291 HOST_WIDE_INT the_const = -1;
2292 int shift = lowest_bit_set;
2293
2294 if ((highest_bit_set != 63
2295 && lowest_bit_set != 0)
2296 || all_bits_between_are_set == 0)
2297 {
2298 the_const =
2299 create_simple_focus_bits (high_bits, low_bits,
2300 lowest_bit_set, 0);
2301 }
2302 else if (lowest_bit_set == 0)
2303 shift = -(63 - highest_bit_set);
2304
2305 if (! SPARC_SIMM13_P (the_const))
2306 abort ();
2307
2308 emit_insn (gen_safe_SET64 (temp, the_const));
2309 if (shift > 0)
2310 emit_insn (gen_rtx_SET (VOIDmode,
2311 op0,
2312 gen_rtx_ASHIFT (DImode,
2313 temp,
2314 GEN_INT (shift))));
2315 else if (shift < 0)
2316 emit_insn (gen_rtx_SET (VOIDmode,
2317 op0,
2318 gen_rtx_LSHIFTRT (DImode,
2319 temp,
2320 GEN_INT (-shift))));
2321 else
2322 abort ();
2323 return;
2324 }
2325
2326 /* Now a range of 22 or less bits set somewhere.
2327 * 1) sethi %hi(focus_bits), %reg
2328 * sllx %reg, shift, %reg
2329 * 2) sethi %hi(focus_bits), %reg
2330 * srlx %reg, shift, %reg
2331 */
2332 if ((highest_bit_set - lowest_bit_set) < 21)
2333 {
2334 unsigned HOST_WIDE_INT focus_bits =
2335 create_simple_focus_bits (high_bits, low_bits,
2336 lowest_bit_set, 10);
2337
2338 if (! SPARC_SETHI_P (focus_bits))
2339 abort ();
2340
2341 sparc_emit_set_safe_HIGH64 (temp, focus_bits);
2342
2343 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
2344 if (lowest_bit_set < 10)
2345 emit_insn (gen_rtx_SET (VOIDmode,
2346 op0,
2347 gen_rtx_LSHIFTRT (DImode, temp,
2348 GEN_INT (10 - lowest_bit_set))));
2349 else if (lowest_bit_set > 10)
2350 emit_insn (gen_rtx_SET (VOIDmode,
2351 op0,
2352 gen_rtx_ASHIFT (DImode, temp,
2353 GEN_INT (lowest_bit_set - 10))));
2354 else
2355 abort ();
2356 return;
2357 }
2358
2359 /* 1) sethi %hi(low_bits), %reg
2360 * or %reg, %lo(low_bits), %reg
2361 * 2) sethi %hi(~low_bits), %reg
2362 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
2363 */
2364 if (high_bits == 0
2365 || high_bits == 0xffffffff)
2366 {
2367 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
2368 (high_bits == 0xffffffff));
2369 return;
2370 }
2371
2372 /* Now, try 3-insn sequences. */
2373
2374 /* 1) sethi %hi(high_bits), %reg
2375 * or %reg, %lo(high_bits), %reg
2376 * sllx %reg, 32, %reg
2377 */
2378 if (low_bits == 0)
2379 {
2380 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
2381 return;
2382 }
2383
2384 /* We may be able to do something quick
2385 when the constant is negated, so try that. */
2386 if (const64_is_2insns ((~high_bits) & 0xffffffff,
2387 (~low_bits) & 0xfffffc00))
2388 {
2389 /* NOTE: The trailing bits get XOR'd so we need the
2390 non-negated bits, not the negated ones. */
2391 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
2392
2393 if ((((~high_bits) & 0xffffffff) == 0
2394 && ((~low_bits) & 0x80000000) == 0)
2395 || (((~high_bits) & 0xffffffff) == 0xffffffff
2396 && ((~low_bits) & 0x80000000) != 0))
2397 {
2398 int fast_int = (~low_bits & 0xffffffff);
2399
2400 if ((SPARC_SETHI_P (fast_int)
2401 && (~high_bits & 0xffffffff) == 0)
2402 || SPARC_SIMM13_P (fast_int))
2403 emit_insn (gen_safe_SET64 (temp, fast_int));
2404 else
2405 sparc_emit_set_const64 (temp, GEN_INT64 (fast_int));
2406 }
2407 else
2408 {
2409 rtx negated_const;
2410 #if HOST_BITS_PER_WIDE_INT == 64
2411 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
2412 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
2413 #else
2414 negated_const = immed_double_const ((~low_bits) & 0xfffffc00,
2415 (~high_bits) & 0xffffffff,
2416 DImode);
2417 #endif
2418 sparc_emit_set_const64 (temp, negated_const);
2419 }
2420
2421 /* If we are XOR'ing with -1, then we should emit a one's complement
2422 instead. This way the combiner will notice logical operations
2423 such as ANDN later on and substitute. */
2424 if (trailing_bits == 0x3ff)
2425 {
2426 emit_insn (gen_rtx_SET (VOIDmode, op0,
2427 gen_rtx_NOT (DImode, temp)));
2428 }
2429 else
2430 {
2431 emit_insn (gen_rtx_SET (VOIDmode,
2432 op0,
2433 gen_safe_XOR64 (temp,
2434 (-0x400 | trailing_bits))));
2435 }
2436 return;
2437 }
2438
2439 /* 1) sethi %hi(xxx), %reg
2440 * or %reg, %lo(xxx), %reg
2441 * sllx %reg, yyy, %reg
2442 *
2443 * ??? This is just a generalized version of the low_bits==0
2444 * thing above, FIXME...
2445 */
2446 if ((highest_bit_set - lowest_bit_set) < 32)
2447 {
2448 unsigned HOST_WIDE_INT focus_bits =
2449 create_simple_focus_bits (high_bits, low_bits,
2450 lowest_bit_set, 0);
2451
2452 /* We can't get here in this state. */
2453 if (highest_bit_set < 32
2454 || lowest_bit_set >= 32)
2455 abort ();
2456
2457 /* So what we know is that the set bits straddle the
2458 middle of the 64-bit word. */
2459 sparc_emit_set_const64_quick2 (op0, temp,
2460 focus_bits, 0,
2461 lowest_bit_set);
2462 return;
2463 }
2464
2465 /* 1) sethi %hi(high_bits), %reg
2466 * or %reg, %lo(high_bits), %reg
2467 * sllx %reg, 32, %reg
2468 * or %reg, low_bits, %reg
2469 */
2470 if (SPARC_SIMM13_P(low_bits)
2471 && ((int)low_bits > 0))
2472 {
2473 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2474 return;
2475 }
2476
2477 /* The easiest way when all else fails, is full decomposition. */
2478 #if 0
2479 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
2480 high_bits, low_bits, ~high_bits, ~low_bits);
2481 #endif
2482 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2483 }
2484
2485 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2486 return the mode to be used for the comparison. For floating-point,
2487 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2488 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2489 processing is needed. */
2490
2491 enum machine_mode
2492 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2493 {
2494 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2495 {
2496 switch (op)
2497 {
2498 case EQ:
2499 case NE:
2500 case UNORDERED:
2501 case ORDERED:
2502 case UNLT:
2503 case UNLE:
2504 case UNGT:
2505 case UNGE:
2506 case UNEQ:
2507 case LTGT:
2508 return CCFPmode;
2509
2510 case LT:
2511 case LE:
2512 case GT:
2513 case GE:
2514 return CCFPEmode;
2515
2516 default:
2517 abort ();
2518 }
2519 }
2520 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2521 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2522 {
2523 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2524 return CCX_NOOVmode;
2525 else
2526 return CC_NOOVmode;
2527 }
2528 else
2529 {
2530 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2531 return CCXmode;
2532 else
2533 return CCmode;
2534 }
2535 }
2536
2537 /* X and Y are two things to compare using CODE. Emit the compare insn and
2538 return the rtx for the cc reg in the proper mode. */
2539
2540 rtx
2541 gen_compare_reg (enum rtx_code code, rtx x, rtx y)
2542 {
2543 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
2544 rtx cc_reg;
2545
2546 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2547 fcc regs (cse can't tell they're really call clobbered regs and will
2548 remove a duplicate comparison even if there is an intervening function
2549 call - it will then try to reload the cc reg via an int reg which is why
2550 we need the movcc patterns). It is possible to provide the movcc
2551 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2552 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2553 to tell cse that CCFPE mode registers (even pseudos) are call
2554 clobbered. */
2555
2556 /* ??? This is an experiment. Rather than making changes to cse which may
2557 or may not be easy/clean, we do our own cse. This is possible because
2558 we will generate hard registers. Cse knows they're call clobbered (it
2559 doesn't know the same thing about pseudos). If we guess wrong, no big
2560 deal, but if we win, great! */
2561
2562 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2563 #if 1 /* experiment */
2564 {
2565 int reg;
2566 /* We cycle through the registers to ensure they're all exercised. */
2567 static int next_fcc_reg = 0;
2568 /* Previous x,y for each fcc reg. */
2569 static rtx prev_args[4][2];
2570
2571 /* Scan prev_args for x,y. */
2572 for (reg = 0; reg < 4; reg++)
2573 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2574 break;
2575 if (reg == 4)
2576 {
2577 reg = next_fcc_reg;
2578 prev_args[reg][0] = x;
2579 prev_args[reg][1] = y;
2580 next_fcc_reg = (next_fcc_reg + 1) & 3;
2581 }
2582 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2583 }
2584 #else
2585 cc_reg = gen_reg_rtx (mode);
2586 #endif /* ! experiment */
2587 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2588 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2589 else
2590 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2591
2592 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2593 gen_rtx_COMPARE (mode, x, y)));
2594
2595 return cc_reg;
2596 }
2597
2598 /* This function is used for v9 only.
2599 CODE is the code for an Scc's comparison.
2600 OPERANDS[0] is the target of the Scc insn.
2601 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2602 been generated yet).
2603
2604 This function is needed to turn
2605
2606 (set (reg:SI 110)
2607 (gt (reg:CCX 100 %icc)
2608 (const_int 0)))
2609 into
2610 (set (reg:SI 110)
2611 (gt:DI (reg:CCX 100 %icc)
2612 (const_int 0)))
2613
2614 IE: The instruction recognizer needs to see the mode of the comparison to
2615 find the right instruction. We could use "gt:DI" right in the
2616 define_expand, but leaving it out allows us to handle DI, SI, etc.
2617
2618 We refer to the global sparc compare operands sparc_compare_op0 and
2619 sparc_compare_op1. */
2620
2621 int
2622 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2623 {
2624 rtx temp, op0, op1;
2625
2626 if (! TARGET_ARCH64
2627 && (GET_MODE (sparc_compare_op0) == DImode
2628 || GET_MODE (operands[0]) == DImode))
2629 return 0;
2630
2631 op0 = sparc_compare_op0;
2632 op1 = sparc_compare_op1;
2633
2634 /* Try to use the movrCC insns. */
2635 if (TARGET_ARCH64
2636 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
2637 && op1 == const0_rtx
2638 && v9_regcmp_p (compare_code))
2639 {
2640 /* Special case for op0 != 0. This can be done with one instruction if
2641 operands[0] == sparc_compare_op0. */
2642
2643 if (compare_code == NE
2644 && GET_MODE (operands[0]) == DImode
2645 && rtx_equal_p (op0, operands[0]))
2646 {
2647 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2648 gen_rtx_IF_THEN_ELSE (DImode,
2649 gen_rtx_fmt_ee (compare_code, DImode,
2650 op0, const0_rtx),
2651 const1_rtx,
2652 operands[0])));
2653 return 1;
2654 }
2655
2656 if (reg_overlap_mentioned_p (operands[0], op0))
2657 {
2658 /* Handle the case where operands[0] == sparc_compare_op0.
2659 We "early clobber" the result. */
2660 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2661 emit_move_insn (op0, sparc_compare_op0);
2662 }
2663
2664 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2665 if (GET_MODE (op0) != DImode)
2666 {
2667 temp = gen_reg_rtx (DImode);
2668 convert_move (temp, op0, 0);
2669 }
2670 else
2671 temp = op0;
2672 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2673 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2674 gen_rtx_fmt_ee (compare_code, DImode,
2675 temp, const0_rtx),
2676 const1_rtx,
2677 operands[0])));
2678 return 1;
2679 }
2680 else
2681 {
2682 operands[1] = gen_compare_reg (compare_code, op0, op1);
2683
2684 switch (GET_MODE (operands[1]))
2685 {
2686 case CCmode :
2687 case CCXmode :
2688 case CCFPEmode :
2689 case CCFPmode :
2690 break;
2691 default :
2692 abort ();
2693 }
2694 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2695 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2696 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2697 gen_rtx_fmt_ee (compare_code,
2698 GET_MODE (operands[1]),
2699 operands[1], const0_rtx),
2700 const1_rtx, operands[0])));
2701 return 1;
2702 }
2703 }
2704
2705 /* Emit a conditional jump insn for the v9 architecture using comparison code
2706 CODE and jump target LABEL.
2707 This function exists to take advantage of the v9 brxx insns. */
2708
2709 void
2710 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2711 {
2712 emit_jump_insn (gen_rtx_SET (VOIDmode,
2713 pc_rtx,
2714 gen_rtx_IF_THEN_ELSE (VOIDmode,
2715 gen_rtx_fmt_ee (code, GET_MODE (op0),
2716 op0, const0_rtx),
2717 gen_rtx_LABEL_REF (VOIDmode, label),
2718 pc_rtx)));
2719 }
2720
2721 /* Generate a DFmode part of a hard TFmode register.
2722 REG is the TFmode hard register, LOW is 1 for the
2723 low 64bit of the register and 0 otherwise.
2724 */
2725 rtx
2726 gen_df_reg (rtx reg, int low)
2727 {
2728 int regno = REGNO (reg);
2729
2730 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2731 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2732 return gen_rtx_REG (DFmode, regno);
2733 }
2734 \f
2735 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2736 Unlike normal calls, TFmode operands are passed by reference. It is
2737 assumed that no more than 3 operands are required. */
2738
2739 static void
2740 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2741 {
2742 rtx ret_slot = NULL, arg[3], func_sym;
2743 int i;
2744
2745 /* We only expect to be called for conversions, unary, and binary ops. */
2746 if (nargs < 2 || nargs > 3)
2747 abort ();
2748
2749 for (i = 0; i < nargs; ++i)
2750 {
2751 rtx this_arg = operands[i];
2752 rtx this_slot;
2753
2754 /* TFmode arguments and return values are passed by reference. */
2755 if (GET_MODE (this_arg) == TFmode)
2756 {
2757 int force_stack_temp;
2758
2759 force_stack_temp = 0;
2760 if (TARGET_BUGGY_QP_LIB && i == 0)
2761 force_stack_temp = 1;
2762
2763 if (GET_CODE (this_arg) == MEM
2764 && ! force_stack_temp)
2765 this_arg = XEXP (this_arg, 0);
2766 else if (CONSTANT_P (this_arg)
2767 && ! force_stack_temp)
2768 {
2769 this_slot = force_const_mem (TFmode, this_arg);
2770 this_arg = XEXP (this_slot, 0);
2771 }
2772 else
2773 {
2774 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2775
2776 /* Operand 0 is the return value. We'll copy it out later. */
2777 if (i > 0)
2778 emit_move_insn (this_slot, this_arg);
2779 else
2780 ret_slot = this_slot;
2781
2782 this_arg = XEXP (this_slot, 0);
2783 }
2784 }
2785
2786 arg[i] = this_arg;
2787 }
2788
2789 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2790
2791 if (GET_MODE (operands[0]) == TFmode)
2792 {
2793 if (nargs == 2)
2794 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2795 arg[0], GET_MODE (arg[0]),
2796 arg[1], GET_MODE (arg[1]));
2797 else
2798 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2799 arg[0], GET_MODE (arg[0]),
2800 arg[1], GET_MODE (arg[1]),
2801 arg[2], GET_MODE (arg[2]));
2802
2803 if (ret_slot)
2804 emit_move_insn (operands[0], ret_slot);
2805 }
2806 else
2807 {
2808 rtx ret;
2809
2810 if (nargs != 2)
2811 abort ();
2812
2813 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2814 GET_MODE (operands[0]), 1,
2815 arg[1], GET_MODE (arg[1]));
2816
2817 if (ret != operands[0])
2818 emit_move_insn (operands[0], ret);
2819 }
2820 }
2821
2822 /* Expand soft-float TFmode calls to sparc abi routines. */
2823
2824 static void
2825 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2826 {
2827 const char *func;
2828
2829 switch (code)
2830 {
2831 case PLUS:
2832 func = "_Qp_add";
2833 break;
2834 case MINUS:
2835 func = "_Qp_sub";
2836 break;
2837 case MULT:
2838 func = "_Qp_mul";
2839 break;
2840 case DIV:
2841 func = "_Qp_div";
2842 break;
2843 default:
2844 abort ();
2845 }
2846
2847 emit_soft_tfmode_libcall (func, 3, operands);
2848 }
2849
2850 static void
2851 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2852 {
2853 const char *func;
2854
2855 switch (code)
2856 {
2857 case SQRT:
2858 func = "_Qp_sqrt";
2859 break;
2860 default:
2861 abort ();
2862 }
2863
2864 emit_soft_tfmode_libcall (func, 2, operands);
2865 }
2866
2867 static void
2868 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2869 {
2870 const char *func;
2871
2872 switch (code)
2873 {
2874 case FLOAT_EXTEND:
2875 switch (GET_MODE (operands[1]))
2876 {
2877 case SFmode:
2878 func = "_Qp_stoq";
2879 break;
2880 case DFmode:
2881 func = "_Qp_dtoq";
2882 break;
2883 default:
2884 abort ();
2885 }
2886 break;
2887
2888 case FLOAT_TRUNCATE:
2889 switch (GET_MODE (operands[0]))
2890 {
2891 case SFmode:
2892 func = "_Qp_qtos";
2893 break;
2894 case DFmode:
2895 func = "_Qp_qtod";
2896 break;
2897 default:
2898 abort ();
2899 }
2900 break;
2901
2902 case FLOAT:
2903 switch (GET_MODE (operands[1]))
2904 {
2905 case SImode:
2906 func = "_Qp_itoq";
2907 break;
2908 case DImode:
2909 func = "_Qp_xtoq";
2910 break;
2911 default:
2912 abort ();
2913 }
2914 break;
2915
2916 case UNSIGNED_FLOAT:
2917 switch (GET_MODE (operands[1]))
2918 {
2919 case SImode:
2920 func = "_Qp_uitoq";
2921 break;
2922 case DImode:
2923 func = "_Qp_uxtoq";
2924 break;
2925 default:
2926 abort ();
2927 }
2928 break;
2929
2930 case FIX:
2931 switch (GET_MODE (operands[0]))
2932 {
2933 case SImode:
2934 func = "_Qp_qtoi";
2935 break;
2936 case DImode:
2937 func = "_Qp_qtox";
2938 break;
2939 default:
2940 abort ();
2941 }
2942 break;
2943
2944 case UNSIGNED_FIX:
2945 switch (GET_MODE (operands[0]))
2946 {
2947 case SImode:
2948 func = "_Qp_qtoui";
2949 break;
2950 case DImode:
2951 func = "_Qp_qtoux";
2952 break;
2953 default:
2954 abort ();
2955 }
2956 break;
2957
2958 default:
2959 abort ();
2960 }
2961
2962 emit_soft_tfmode_libcall (func, 2, operands);
2963 }
2964
2965 /* Expand a hard-float tfmode operation. All arguments must be in
2966 registers. */
2967
2968 static void
2969 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2970 {
2971 rtx op, dest;
2972
2973 if (GET_RTX_CLASS (code) == RTX_UNARY)
2974 {
2975 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2976 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2977 }
2978 else
2979 {
2980 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2981 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2982 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2983 operands[1], operands[2]);
2984 }
2985
2986 if (register_operand (operands[0], VOIDmode))
2987 dest = operands[0];
2988 else
2989 dest = gen_reg_rtx (GET_MODE (operands[0]));
2990
2991 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2992
2993 if (dest != operands[0])
2994 emit_move_insn (operands[0], dest);
2995 }
2996
2997 void
2998 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2999 {
3000 if (TARGET_HARD_QUAD)
3001 emit_hard_tfmode_operation (code, operands);
3002 else
3003 emit_soft_tfmode_binop (code, operands);
3004 }
3005
3006 void
3007 emit_tfmode_unop (enum rtx_code code, rtx *operands)
3008 {
3009 if (TARGET_HARD_QUAD)
3010 emit_hard_tfmode_operation (code, operands);
3011 else
3012 emit_soft_tfmode_unop (code, operands);
3013 }
3014
3015 void
3016 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
3017 {
3018 if (TARGET_HARD_QUAD)
3019 emit_hard_tfmode_operation (code, operands);
3020 else
3021 emit_soft_tfmode_cvt (code, operands);
3022 }
3023 \f
3024 /* Return nonzero if a branch/jump/call instruction will be emitting
3025 nop into its delay slot. */
3026
3027 int
3028 empty_delay_slot (rtx insn)
3029 {
3030 rtx seq;
3031
3032 /* If no previous instruction (should not happen), return true. */
3033 if (PREV_INSN (insn) == NULL)
3034 return 1;
3035
3036 seq = NEXT_INSN (PREV_INSN (insn));
3037 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
3038 return 0;
3039
3040 return 1;
3041 }
3042
3043 /* Return nonzero if TRIAL can go into the call delay slot. */
3044
3045 int
3046 tls_call_delay (rtx trial)
3047 {
3048 rtx pat, unspec;
3049
3050 /* Binutils allows
3051 call __tls_get_addr, %tgd_call (foo)
3052 add %l7, %o0, %o0, %tgd_add (foo)
3053 while Sun as/ld does not. */
3054 if (TARGET_GNU_TLS || !TARGET_TLS)
3055 return 1;
3056
3057 pat = PATTERN (trial);
3058 if (GET_CODE (pat) != SET || GET_CODE (SET_DEST (pat)) != PLUS)
3059 return 1;
3060
3061 unspec = XEXP (SET_DEST (pat), 1);
3062 if (GET_CODE (unspec) != UNSPEC
3063 || (XINT (unspec, 1) != UNSPEC_TLSGD
3064 && XINT (unspec, 1) != UNSPEC_TLSLDM))
3065 return 1;
3066
3067 return 0;
3068 }
3069
3070 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
3071 instruction. RETURN_P is true if the v9 variant 'return' is to be
3072 considered in the test too.
3073
3074 TRIAL must be a SET whose destination is a REG appropriate for the
3075 'restore' instruction or, if RETURN_P is true, for the 'return'
3076 instruction. */
3077
3078 static int
3079 eligible_for_restore_insn (rtx trial, bool return_p)
3080 {
3081 rtx pat = PATTERN (trial);
3082 rtx src = SET_SRC (pat);
3083
3084 /* The 'restore src,%g0,dest' pattern for word mode and below. */
3085 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3086 && arith_operand (src, GET_MODE (src)))
3087 {
3088 if (TARGET_ARCH64)
3089 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3090 else
3091 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
3092 }
3093
3094 /* The 'restore src,%g0,dest' pattern for double-word mode. */
3095 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
3096 && arith_double_operand (src, GET_MODE (src)))
3097 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
3098
3099 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
3100 else if (! TARGET_FPU && register_operand (src, SFmode))
3101 return 1;
3102
3103 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
3104 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
3105 return 1;
3106
3107 /* If we have the 'return' instruction, anything that does not use
3108 local or output registers and can go into a delay slot wins. */
3109 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
3110 && (get_attr_in_uncond_branch_delay (trial)
3111 == IN_UNCOND_BRANCH_DELAY_TRUE))
3112 return 1;
3113
3114 /* The 'restore src1,src2,dest' pattern for SImode. */
3115 else if (GET_CODE (src) == PLUS
3116 && register_operand (XEXP (src, 0), SImode)
3117 && arith_operand (XEXP (src, 1), SImode))
3118 return 1;
3119
3120 /* The 'restore src1,src2,dest' pattern for DImode. */
3121 else if (GET_CODE (src) == PLUS
3122 && register_operand (XEXP (src, 0), DImode)
3123 && arith_double_operand (XEXP (src, 1), DImode))
3124 return 1;
3125
3126 /* The 'restore src1,%lo(src2),dest' pattern. */
3127 else if (GET_CODE (src) == LO_SUM
3128 && ! TARGET_CM_MEDMID
3129 && ((register_operand (XEXP (src, 0), SImode)
3130 && immediate_operand (XEXP (src, 1), SImode))
3131 || (TARGET_ARCH64
3132 && register_operand (XEXP (src, 0), DImode)
3133 && immediate_operand (XEXP (src, 1), DImode))))
3134 return 1;
3135
3136 /* The 'restore src,src,dest' pattern. */
3137 else if (GET_CODE (src) == ASHIFT
3138 && (register_operand (XEXP (src, 0), SImode)
3139 || register_operand (XEXP (src, 0), DImode))
3140 && XEXP (src, 1) == const1_rtx)
3141 return 1;
3142
3143 return 0;
3144 }
3145
3146 /* Return nonzero if TRIAL can go into the function return's
3147 delay slot. */
3148
3149 int
3150 eligible_for_return_delay (rtx trial)
3151 {
3152 rtx pat;
3153
3154 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3155 return 0;
3156
3157 if (get_attr_length (trial) != 1)
3158 return 0;
3159
3160 /* If there are any call-saved registers, we should scan TRIAL if it
3161 does not reference them. For now just make it easy. */
3162 if (num_gfregs)
3163 return 0;
3164
3165 /* If the function uses __builtin_eh_return, the eh_return machinery
3166 occupies the delay slot. */
3167 if (current_function_calls_eh_return)
3168 return 0;
3169
3170 /* In the case of a true leaf function, anything can go into the slot. */
3171 if (sparc_leaf_function_p)
3172 return get_attr_in_uncond_branch_delay (trial)
3173 == IN_UNCOND_BRANCH_DELAY_TRUE;
3174
3175 pat = PATTERN (trial);
3176
3177 /* Otherwise, only operations which can be done in tandem with
3178 a `restore' or `return' insn can go into the delay slot. */
3179 if (GET_CODE (SET_DEST (pat)) != REG
3180 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
3181 return 0;
3182
3183 /* If this instruction sets up floating point register and we have a return
3184 instruction, it can probably go in. But restore will not work
3185 with FP_REGS. */
3186 if (REGNO (SET_DEST (pat)) >= 32)
3187 return (TARGET_V9
3188 && ! epilogue_renumber (&pat, 1)
3189 && (get_attr_in_uncond_branch_delay (trial)
3190 == IN_UNCOND_BRANCH_DELAY_TRUE));
3191
3192 return eligible_for_restore_insn (trial, true);
3193 }
3194
3195 /* Return nonzero if TRIAL can go into the sibling call's
3196 delay slot. */
3197
3198 int
3199 eligible_for_sibcall_delay (rtx trial)
3200 {
3201 rtx pat;
3202
3203 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
3204 return 0;
3205
3206 if (get_attr_length (trial) != 1)
3207 return 0;
3208
3209 pat = PATTERN (trial);
3210
3211 if (sparc_leaf_function_p)
3212 {
3213 /* If the tail call is done using the call instruction,
3214 we have to restore %o7 in the delay slot. */
3215 if (LEAF_SIBCALL_SLOT_RESERVED_P)
3216 return 0;
3217
3218 /* %g1 is used to build the function address */
3219 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
3220 return 0;
3221
3222 return 1;
3223 }
3224
3225 /* Otherwise, only operations which can be done in tandem with
3226 a `restore' insn can go into the delay slot. */
3227 if (GET_CODE (SET_DEST (pat)) != REG
3228 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
3229 || REGNO (SET_DEST (pat)) >= 32)
3230 return 0;
3231
3232 /* If it mentions %o7, it can't go in, because sibcall will clobber it
3233 in most cases. */
3234 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
3235 return 0;
3236
3237 return eligible_for_restore_insn (trial, false);
3238 }
3239
3240 int
3241 short_branch (int uid1, int uid2)
3242 {
3243 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
3244
3245 /* Leave a few words of "slop". */
3246 if (delta >= -1023 && delta <= 1022)
3247 return 1;
3248
3249 return 0;
3250 }
3251
3252 /* Return nonzero if REG is not used after INSN.
3253 We assume REG is a reload reg, and therefore does
3254 not live past labels or calls or jumps. */
3255 int
3256 reg_unused_after (rtx reg, rtx insn)
3257 {
3258 enum rtx_code code, prev_code = UNKNOWN;
3259
3260 while ((insn = NEXT_INSN (insn)))
3261 {
3262 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
3263 return 1;
3264
3265 code = GET_CODE (insn);
3266 if (GET_CODE (insn) == CODE_LABEL)
3267 return 1;
3268
3269 if (INSN_P (insn))
3270 {
3271 rtx set = single_set (insn);
3272 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
3273 if (set && in_src)
3274 return 0;
3275 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
3276 return 1;
3277 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
3278 return 0;
3279 }
3280 prev_code = code;
3281 }
3282 return 1;
3283 }
3284 \f
3285 /* Determine if it's legal to put X into the constant pool. This
3286 is not possible if X contains the address of a symbol that is
3287 not constant (TLS) or not known at final link time (PIC). */
3288
3289 static bool
3290 sparc_cannot_force_const_mem (rtx x)
3291 {
3292 switch (GET_CODE (x))
3293 {
3294 case CONST_INT:
3295 case CONST_DOUBLE:
3296 /* Accept all non-symbolic constants. */
3297 return false;
3298
3299 case LABEL_REF:
3300 /* Labels are OK iff we are non-PIC. */
3301 return flag_pic != 0;
3302
3303 case SYMBOL_REF:
3304 /* 'Naked' TLS symbol references are never OK,
3305 non-TLS symbols are OK iff we are non-PIC. */
3306 if (SYMBOL_REF_TLS_MODEL (x))
3307 return true;
3308 else
3309 return flag_pic != 0;
3310
3311 case CONST:
3312 return sparc_cannot_force_const_mem (XEXP (x, 0));
3313 case PLUS:
3314 case MINUS:
3315 return sparc_cannot_force_const_mem (XEXP (x, 0))
3316 || sparc_cannot_force_const_mem (XEXP (x, 1));
3317 case UNSPEC:
3318 return true;
3319 default:
3320 abort ();
3321 }
3322 }
3323 \f
3324 /* The table we use to reference PIC data. */
3325 static GTY(()) rtx global_offset_table;
3326
3327 /* The function we use to get at it. */
3328 static GTY(()) rtx add_pc_to_pic_symbol;
3329 static GTY(()) char add_pc_to_pic_symbol_name[256];
3330
3331 /* Ensure that we are not using patterns that are not OK with PIC. */
3332
3333 int
3334 check_pic (int i)
3335 {
3336 switch (flag_pic)
3337 {
3338 case 1:
3339 if (GET_CODE (recog_data.operand[i]) == SYMBOL_REF
3340 || (GET_CODE (recog_data.operand[i]) == CONST
3341 && ! (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
3342 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
3343 == global_offset_table)
3344 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
3345 == CONST))))
3346 abort ();
3347 case 2:
3348 default:
3349 return 1;
3350 }
3351 }
3352
3353 /* Return true if X is an address which needs a temporary register when
3354 reloaded while generating PIC code. */
3355
3356 int
3357 pic_address_needs_scratch (rtx x)
3358 {
3359 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3360 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3361 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3362 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3363 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3364 return 1;
3365
3366 return 0;
3367 }
3368
3369 /* Determine if a given RTX is a valid constant. We already know this
3370 satisfies CONSTANT_P. */
3371
3372 bool
3373 legitimate_constant_p (rtx x)
3374 {
3375 rtx inner;
3376
3377 switch (GET_CODE (x))
3378 {
3379 case SYMBOL_REF:
3380 /* TLS symbols are not constant. */
3381 if (SYMBOL_REF_TLS_MODEL (x))
3382 return false;
3383 break;
3384
3385 case CONST:
3386 inner = XEXP (x, 0);
3387
3388 /* Offsets of TLS symbols are never valid.
3389 Discourage CSE from creating them. */
3390 if (GET_CODE (inner) == PLUS
3391 && tls_symbolic_operand (XEXP (inner, 0)))
3392 return false;
3393 break;
3394
3395 case CONST_DOUBLE:
3396 if (GET_MODE (x) == VOIDmode)
3397 return true;
3398
3399 /* Floating point constants are generally not ok.
3400 The only exception is 0.0 in VIS. */
3401 if (TARGET_VIS
3402 && (GET_MODE (x) == SFmode
3403 || GET_MODE (x) == DFmode
3404 || GET_MODE (x) == TFmode)
3405 && fp_zero_operand (x, GET_MODE (x)))
3406 return true;
3407
3408 return false;
3409
3410 default:
3411 break;
3412 }
3413
3414 return true;
3415 }
3416
3417 /* Determine if a given RTX is a valid constant address. */
3418
3419 bool
3420 constant_address_p (rtx x)
3421 {
3422 switch (GET_CODE (x))
3423 {
3424 case LABEL_REF:
3425 case CONST_INT:
3426 case HIGH:
3427 return true;
3428
3429 case CONST:
3430 if (flag_pic && pic_address_needs_scratch (x))
3431 return false;
3432 return legitimate_constant_p (x);
3433
3434 case SYMBOL_REF:
3435 return !flag_pic && legitimate_constant_p (x);
3436
3437 default:
3438 return false;
3439 }
3440 }
3441
3442 /* Nonzero if the constant value X is a legitimate general operand
3443 when generating PIC code. It is given that flag_pic is on and
3444 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3445
3446 bool
3447 legitimate_pic_operand_p (rtx x)
3448 {
3449 if (pic_address_needs_scratch (x))
3450 return false;
3451 if (tls_symbolic_operand (x)
3452 || (GET_CODE (x) == CONST
3453 && GET_CODE (XEXP (x, 0)) == PLUS
3454 && tls_symbolic_operand (XEXP (XEXP (x, 0), 0))))
3455 return false;
3456 return true;
3457 }
3458
3459 /* Return nonzero if ADDR is a valid memory address.
3460 STRICT specifies whether strict register checking applies. */
3461
3462 int
3463 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
3464 {
3465 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL, imm2;
3466
3467 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3468 rs1 = addr;
3469 else if (GET_CODE (addr) == PLUS)
3470 {
3471 rs1 = XEXP (addr, 0);
3472 rs2 = XEXP (addr, 1);
3473
3474 /* Canonicalize. REG comes first, if there are no regs,
3475 LO_SUM comes first. */
3476 if (!REG_P (rs1)
3477 && GET_CODE (rs1) != SUBREG
3478 && (REG_P (rs2)
3479 || GET_CODE (rs2) == SUBREG
3480 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3481 {
3482 rs1 = XEXP (addr, 1);
3483 rs2 = XEXP (addr, 0);
3484 }
3485
3486 if ((flag_pic == 1
3487 && rs1 == pic_offset_table_rtx
3488 && !REG_P (rs2)
3489 && GET_CODE (rs2) != SUBREG
3490 && GET_CODE (rs2) != LO_SUM
3491 && GET_CODE (rs2) != MEM
3492 && !tls_symbolic_operand (rs2)
3493 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3494 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3495 || ((REG_P (rs1)
3496 || GET_CODE (rs1) == SUBREG)
3497 && RTX_OK_FOR_OFFSET_P (rs2)))
3498 {
3499 imm1 = rs2;
3500 rs2 = NULL;
3501 }
3502 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3503 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3504 {
3505 /* We prohibit REG + REG for TFmode when there are no quad move insns
3506 and we consequently need to split. We do this because REG+REG
3507 is not an offsettable address. If we get the situation in reload
3508 where source and destination of a movtf pattern are both MEMs with
3509 REG+REG address, then only one of them gets converted to an
3510 offsettable address. */
3511 if (mode == TFmode
3512 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3513 return 0;
3514
3515 /* We prohibit REG + REG on ARCH32 if not optimizing for
3516 DFmode/DImode because then mem_min_alignment is likely to be zero
3517 after reload and the forced split would lack a matching splitter
3518 pattern. */
3519 if (TARGET_ARCH32 && !optimize
3520 && (mode == DFmode || mode == DImode))
3521 return 0;
3522 }
3523 else if (USE_AS_OFFSETABLE_LO10
3524 && GET_CODE (rs1) == LO_SUM
3525 && TARGET_ARCH64
3526 && ! TARGET_CM_MEDMID
3527 && RTX_OK_FOR_OLO10_P (rs2))
3528 {
3529 imm2 = rs2;
3530 rs2 = NULL;
3531 imm1 = XEXP (rs1, 1);
3532 rs1 = XEXP (rs1, 0);
3533 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3534 return 0;
3535 }
3536 }
3537 else if (GET_CODE (addr) == LO_SUM)
3538 {
3539 rs1 = XEXP (addr, 0);
3540 imm1 = XEXP (addr, 1);
3541
3542 if (! CONSTANT_P (imm1) || tls_symbolic_operand (rs1))
3543 return 0;
3544
3545 if (USE_AS_OFFSETABLE_LO10)
3546 {
3547 /* We can't allow TFmode, because an offset greater than or equal to
3548 the alignment (8) may cause the LO_SUM to overflow if !v9. */
3549 if (mode == TFmode && ! TARGET_V9)
3550 return 0;
3551 }
3552 else
3553 {
3554 /* We prohibit LO_SUM for TFmode when there are no quad move insns
3555 and we consequently need to split. We do this because LO_SUM
3556 is not an offsettable address. If we get the situation in reload
3557 where source and destination of a movtf pattern are both MEMs with
3558 LO_SUM address, then only one of them gets converted to an
3559 offsettable address. */
3560 if (mode == TFmode
3561 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3562 return 0;
3563 }
3564 }
3565 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3566 return 1;
3567 else
3568 return 0;
3569
3570 if (GET_CODE (rs1) == SUBREG)
3571 rs1 = SUBREG_REG (rs1);
3572 if (!REG_P (rs1))
3573 return 0;
3574
3575 if (rs2)
3576 {
3577 if (GET_CODE (rs2) == SUBREG)
3578 rs2 = SUBREG_REG (rs2);
3579 if (!REG_P (rs2))
3580 return 0;
3581 }
3582
3583 if (strict)
3584 {
3585 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3586 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3587 return 0;
3588 }
3589 else
3590 {
3591 if ((REGNO (rs1) >= 32
3592 && REGNO (rs1) != FRAME_POINTER_REGNUM
3593 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3594 || (rs2
3595 && (REGNO (rs2) >= 32
3596 && REGNO (rs2) != FRAME_POINTER_REGNUM
3597 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3598 return 0;
3599 }
3600 return 1;
3601 }
3602
3603 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3604
3605 static GTY(()) rtx sparc_tls_symbol;
3606 static rtx
3607 sparc_tls_get_addr (void)
3608 {
3609 if (!sparc_tls_symbol)
3610 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3611
3612 return sparc_tls_symbol;
3613 }
3614
3615 static rtx
3616 sparc_tls_got (void)
3617 {
3618 rtx temp;
3619 if (flag_pic)
3620 {
3621 current_function_uses_pic_offset_table = 1;
3622 return pic_offset_table_rtx;
3623 }
3624
3625 if (!global_offset_table)
3626 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3627 temp = gen_reg_rtx (Pmode);
3628 emit_move_insn (temp, global_offset_table);
3629 return temp;
3630 }
3631
3632
3633 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3634 this (thread-local) address. */
3635
3636 rtx
3637 legitimize_tls_address (rtx addr)
3638 {
3639 rtx temp1, temp2, temp3, ret, o0, got, insn;
3640
3641 if (no_new_pseudos)
3642 abort ();
3643
3644 if (GET_CODE (addr) == SYMBOL_REF)
3645 switch (SYMBOL_REF_TLS_MODEL (addr))
3646 {
3647 case TLS_MODEL_GLOBAL_DYNAMIC:
3648 start_sequence ();
3649 temp1 = gen_reg_rtx (SImode);
3650 temp2 = gen_reg_rtx (SImode);
3651 ret = gen_reg_rtx (Pmode);
3652 o0 = gen_rtx_REG (Pmode, 8);
3653 got = sparc_tls_got ();
3654 emit_insn (gen_tgd_hi22 (temp1, addr));
3655 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3656 if (TARGET_ARCH32)
3657 {
3658 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3659 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3660 addr, const1_rtx));
3661 }
3662 else
3663 {
3664 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3665 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3666 addr, const1_rtx));
3667 }
3668 CALL_INSN_FUNCTION_USAGE (insn)
3669 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3670 CALL_INSN_FUNCTION_USAGE (insn));
3671 insn = get_insns ();
3672 end_sequence ();
3673 emit_libcall_block (insn, ret, o0, addr);
3674 break;
3675
3676 case TLS_MODEL_LOCAL_DYNAMIC:
3677 start_sequence ();
3678 temp1 = gen_reg_rtx (SImode);
3679 temp2 = gen_reg_rtx (SImode);
3680 temp3 = gen_reg_rtx (Pmode);
3681 ret = gen_reg_rtx (Pmode);
3682 o0 = gen_rtx_REG (Pmode, 8);
3683 got = sparc_tls_got ();
3684 emit_insn (gen_tldm_hi22 (temp1));
3685 emit_insn (gen_tldm_lo10 (temp2, temp1));
3686 if (TARGET_ARCH32)
3687 {
3688 emit_insn (gen_tldm_add32 (o0, got, temp2));
3689 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3690 const1_rtx));
3691 }
3692 else
3693 {
3694 emit_insn (gen_tldm_add64 (o0, got, temp2));
3695 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3696 const1_rtx));
3697 }
3698 CALL_INSN_FUNCTION_USAGE (insn)
3699 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3700 CALL_INSN_FUNCTION_USAGE (insn));
3701 insn = get_insns ();
3702 end_sequence ();
3703 emit_libcall_block (insn, temp3, o0,
3704 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3705 UNSPEC_TLSLD_BASE));
3706 temp1 = gen_reg_rtx (SImode);
3707 temp2 = gen_reg_rtx (SImode);
3708 emit_insn (gen_tldo_hix22 (temp1, addr));
3709 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3710 if (TARGET_ARCH32)
3711 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3712 else
3713 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3714 break;
3715
3716 case TLS_MODEL_INITIAL_EXEC:
3717 temp1 = gen_reg_rtx (SImode);
3718 temp2 = gen_reg_rtx (SImode);
3719 temp3 = gen_reg_rtx (Pmode);
3720 got = sparc_tls_got ();
3721 emit_insn (gen_tie_hi22 (temp1, addr));
3722 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3723 if (TARGET_ARCH32)
3724 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3725 else
3726 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3727 if (TARGET_SUN_TLS)
3728 {
3729 ret = gen_reg_rtx (Pmode);
3730 if (TARGET_ARCH32)
3731 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3732 temp3, addr));
3733 else
3734 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3735 temp3, addr));
3736 }
3737 else
3738 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3739 break;
3740
3741 case TLS_MODEL_LOCAL_EXEC:
3742 temp1 = gen_reg_rtx (Pmode);
3743 temp2 = gen_reg_rtx (Pmode);
3744 if (TARGET_ARCH32)
3745 {
3746 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3747 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3748 }
3749 else
3750 {
3751 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3752 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3753 }
3754 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3755 break;
3756
3757 default:
3758 abort ();
3759 }
3760
3761 else
3762 abort (); /* for now ... */
3763
3764 return ret;
3765 }
3766
3767
3768 /* Legitimize PIC addresses. If the address is already position-independent,
3769 we return ORIG. Newly generated position-independent addresses go into a
3770 reg. This is REG if nonzero, otherwise we allocate register(s) as
3771 necessary. */
3772
3773 rtx
3774 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3775 rtx reg)
3776 {
3777 if (GET_CODE (orig) == SYMBOL_REF)
3778 {
3779 rtx pic_ref, address;
3780 rtx insn;
3781
3782 if (reg == 0)
3783 {
3784 if (reload_in_progress || reload_completed)
3785 abort ();
3786 else
3787 reg = gen_reg_rtx (Pmode);
3788 }
3789
3790 if (flag_pic == 2)
3791 {
3792 /* If not during reload, allocate another temp reg here for loading
3793 in the address, so that these instructions can be optimized
3794 properly. */
3795 rtx temp_reg = ((reload_in_progress || reload_completed)
3796 ? reg : gen_reg_rtx (Pmode));
3797
3798 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3799 won't get confused into thinking that these two instructions
3800 are loading in the true address of the symbol. If in the
3801 future a PIC rtx exists, that should be used instead. */
3802 if (Pmode == SImode)
3803 {
3804 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3805 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3806 }
3807 else
3808 {
3809 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3810 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3811 }
3812 address = temp_reg;
3813 }
3814 else
3815 address = orig;
3816
3817 pic_ref = gen_const_mem (Pmode,
3818 gen_rtx_PLUS (Pmode,
3819 pic_offset_table_rtx, address));
3820 current_function_uses_pic_offset_table = 1;
3821 insn = emit_move_insn (reg, pic_ref);
3822 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3823 by loop. */
3824 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3825 REG_NOTES (insn));
3826 return reg;
3827 }
3828 else if (GET_CODE (orig) == CONST)
3829 {
3830 rtx base, offset;
3831
3832 if (GET_CODE (XEXP (orig, 0)) == PLUS
3833 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3834 return orig;
3835
3836 if (reg == 0)
3837 {
3838 if (reload_in_progress || reload_completed)
3839 abort ();
3840 else
3841 reg = gen_reg_rtx (Pmode);
3842 }
3843
3844 if (GET_CODE (XEXP (orig, 0)) == PLUS)
3845 {
3846 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3847 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3848 base == reg ? 0 : reg);
3849 }
3850 else
3851 abort ();
3852
3853 if (GET_CODE (offset) == CONST_INT)
3854 {
3855 if (SMALL_INT (offset))
3856 return plus_constant (base, INTVAL (offset));
3857 else if (! reload_in_progress && ! reload_completed)
3858 offset = force_reg (Pmode, offset);
3859 else
3860 /* If we reach here, then something is seriously wrong. */
3861 abort ();
3862 }
3863 return gen_rtx_PLUS (Pmode, base, offset);
3864 }
3865 else if (GET_CODE (orig) == LABEL_REF)
3866 /* ??? Why do we do this? */
3867 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3868 the register is live instead, in case it is eliminated. */
3869 current_function_uses_pic_offset_table = 1;
3870
3871 return orig;
3872 }
3873
3874 /* Try machine-dependent ways of modifying an illegitimate address X
3875 to be legitimate. If we find one, return the new, valid address.
3876
3877 OLDX is the address as it was before break_out_memory_refs was called.
3878 In some cases it is useful to look at this to decide what needs to be done.
3879
3880 MODE is the mode of the operand pointed to by X. */
3881
3882 rtx
3883 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3884 {
3885 rtx orig_x = x;
3886
3887 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3888 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3889 force_operand (XEXP (x, 0), NULL_RTX));
3890 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3891 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3892 force_operand (XEXP (x, 1), NULL_RTX));
3893 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3894 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3895 XEXP (x, 1));
3896 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3897 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3898 force_operand (XEXP (x, 1), NULL_RTX));
3899
3900 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3901 return x;
3902
3903 if (tls_symbolic_operand (x))
3904 x = legitimize_tls_address (x);
3905 else if (flag_pic)
3906 x = legitimize_pic_address (x, mode, 0);
3907 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3908 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3909 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3910 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3911 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3912 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3913 else if (GET_CODE (x) == SYMBOL_REF
3914 || GET_CODE (x) == CONST
3915 || GET_CODE (x) == LABEL_REF)
3916 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3917 return x;
3918 }
3919
3920 /* Emit the special PIC prologue. */
3921
3922 static void
3923 load_pic_register (void)
3924 {
3925 int orig_flag_pic = flag_pic;
3926
3927 /* If we haven't emitted the special helper function, do so now. */
3928 if (add_pc_to_pic_symbol_name[0] == 0)
3929 {
3930 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3931 int align;
3932
3933 ASM_GENERATE_INTERNAL_LABEL (add_pc_to_pic_symbol_name, "LADDPC", 0);
3934 text_section ();
3935
3936 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3937 if (align > 0)
3938 ASM_OUTPUT_ALIGN (asm_out_file, align);
3939 ASM_OUTPUT_LABEL (asm_out_file, add_pc_to_pic_symbol_name);
3940 if (flag_delayed_branch)
3941 fprintf (asm_out_file, "\tjmp %%o7+8\n\t add\t%%o7, %s, %s\n",
3942 pic_name, pic_name);
3943 else
3944 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp %%o7+8\n\t nop\n",
3945 pic_name, pic_name);
3946 }
3947
3948 /* Initialize every time through, since we can't easily
3949 know this to be permanent. */
3950 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3951 add_pc_to_pic_symbol = gen_rtx_SYMBOL_REF (Pmode, add_pc_to_pic_symbol_name);
3952
3953 flag_pic = 0;
3954 emit_insn (gen_load_pcrel_sym (pic_offset_table_rtx, global_offset_table,
3955 add_pc_to_pic_symbol));
3956 flag_pic = orig_flag_pic;
3957
3958 /* Need to emit this whether or not we obey regdecls,
3959 since setjmp/longjmp can cause life info to screw up.
3960 ??? In the case where we don't obey regdecls, this is not sufficient
3961 since we may not fall out the bottom. */
3962 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3963 }
3964 \f
3965 /* Return 1 if RTX is a MEM which is known to be aligned to at
3966 least a DESIRED byte boundary. */
3967
3968 int
3969 mem_min_alignment (rtx mem, int desired)
3970 {
3971 rtx addr, base, offset;
3972
3973 /* If it's not a MEM we can't accept it. */
3974 if (GET_CODE (mem) != MEM)
3975 return 0;
3976
3977 addr = XEXP (mem, 0);
3978 base = offset = NULL_RTX;
3979 if (GET_CODE (addr) == PLUS)
3980 {
3981 if (GET_CODE (XEXP (addr, 0)) == REG)
3982 {
3983 base = XEXP (addr, 0);
3984
3985 /* What we are saying here is that if the base
3986 REG is aligned properly, the compiler will make
3987 sure any REG based index upon it will be so
3988 as well. */
3989 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3990 offset = XEXP (addr, 1);
3991 else
3992 offset = const0_rtx;
3993 }
3994 }
3995 else if (GET_CODE (addr) == REG)
3996 {
3997 base = addr;
3998 offset = const0_rtx;
3999 }
4000
4001 if (base != NULL_RTX)
4002 {
4003 int regno = REGNO (base);
4004
4005 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
4006 {
4007 /* Check if the compiler has recorded some information
4008 about the alignment of the base REG. If reload has
4009 completed, we already matched with proper alignments.
4010 If not running global_alloc, reload might give us
4011 unaligned pointer to local stack though. */
4012 if (((cfun != 0
4013 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
4014 || (optimize && reload_completed))
4015 && (INTVAL (offset) & (desired - 1)) == 0)
4016 return 1;
4017 }
4018 else
4019 {
4020 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
4021 return 1;
4022 }
4023 }
4024 else if (! TARGET_UNALIGNED_DOUBLES
4025 || CONSTANT_P (addr)
4026 || GET_CODE (addr) == LO_SUM)
4027 {
4028 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
4029 is true, in which case we can only assume that an access is aligned if
4030 it is to a constant address, or the address involves a LO_SUM. */
4031 return 1;
4032 }
4033
4034 /* An obviously unaligned address. */
4035 return 0;
4036 }
4037
4038 \f
4039 /* Vectors to keep interesting information about registers where it can easily
4040 be got. We used to use the actual mode value as the bit number, but there
4041 are more than 32 modes now. Instead we use two tables: one indexed by
4042 hard register number, and one indexed by mode. */
4043
4044 /* The purpose of sparc_mode_class is to shrink the range of modes so that
4045 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
4046 mapped into one sparc_mode_class mode. */
4047
4048 enum sparc_mode_class {
4049 S_MODE, D_MODE, T_MODE, O_MODE,
4050 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
4051 CC_MODE, CCFP_MODE
4052 };
4053
4054 /* Modes for single-word and smaller quantities. */
4055 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
4056
4057 /* Modes for double-word and smaller quantities. */
4058 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
4059
4060 /* Modes for quad-word and smaller quantities. */
4061 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
4062
4063 /* Modes for 8-word and smaller quantities. */
4064 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
4065
4066 /* Modes for single-float quantities. We must allow any single word or
4067 smaller quantity. This is because the fix/float conversion instructions
4068 take integer inputs/outputs from the float registers. */
4069 #define SF_MODES (S_MODES)
4070
4071 /* Modes for double-float and smaller quantities. */
4072 #define DF_MODES (S_MODES | D_MODES)
4073
4074 /* Modes for double-float only quantities. */
4075 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
4076
4077 /* Modes for quad-float only quantities. */
4078 #define TF_ONLY_MODES (1 << (int) TF_MODE)
4079
4080 /* Modes for quad-float and smaller quantities. */
4081 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
4082
4083 /* Modes for quad-float and double-float quantities. */
4084 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
4085
4086 /* Modes for quad-float pair only quantities. */
4087 #define OF_ONLY_MODES (1 << (int) OF_MODE)
4088
4089 /* Modes for quad-float pairs and smaller quantities. */
4090 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
4091
4092 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
4093
4094 /* Modes for condition codes. */
4095 #define CC_MODES (1 << (int) CC_MODE)
4096 #define CCFP_MODES (1 << (int) CCFP_MODE)
4097
4098 /* Value is 1 if register/mode pair is acceptable on sparc.
4099 The funny mixture of D and T modes is because integer operations
4100 do not specially operate on tetra quantities, so non-quad-aligned
4101 registers can hold quadword quantities (except %o4 and %i4 because
4102 they cross fixed registers). */
4103
4104 /* This points to either the 32 bit or the 64 bit version. */
4105 const int *hard_regno_mode_classes;
4106
4107 static const int hard_32bit_mode_classes[] = {
4108 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4109 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4110 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
4111 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
4112
4113 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4114 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4115 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4116 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4117
4118 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4119 and none can hold SFmode/SImode values. */
4120 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4121 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4122 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4123 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4124
4125 /* %fcc[0123] */
4126 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4127
4128 /* %icc */
4129 CC_MODES
4130 };
4131
4132 static const int hard_64bit_mode_classes[] = {
4133 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4134 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4135 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4136 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4137
4138 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4139 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4140 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4141 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4142
4143 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4144 and none can hold SFmode/SImode values. */
4145 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4146 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4147 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4148 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4149
4150 /* %fcc[0123] */
4151 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4152
4153 /* %icc */
4154 CC_MODES
4155 };
4156
4157 int sparc_mode_class [NUM_MACHINE_MODES];
4158
4159 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4160
4161 static void
4162 sparc_init_modes (void)
4163 {
4164 int i;
4165
4166 for (i = 0; i < NUM_MACHINE_MODES; i++)
4167 {
4168 switch (GET_MODE_CLASS (i))
4169 {
4170 case MODE_INT:
4171 case MODE_PARTIAL_INT:
4172 case MODE_COMPLEX_INT:
4173 if (GET_MODE_SIZE (i) <= 4)
4174 sparc_mode_class[i] = 1 << (int) S_MODE;
4175 else if (GET_MODE_SIZE (i) == 8)
4176 sparc_mode_class[i] = 1 << (int) D_MODE;
4177 else if (GET_MODE_SIZE (i) == 16)
4178 sparc_mode_class[i] = 1 << (int) T_MODE;
4179 else if (GET_MODE_SIZE (i) == 32)
4180 sparc_mode_class[i] = 1 << (int) O_MODE;
4181 else
4182 sparc_mode_class[i] = 0;
4183 break;
4184 case MODE_FLOAT:
4185 case MODE_COMPLEX_FLOAT:
4186 if (GET_MODE_SIZE (i) <= 4)
4187 sparc_mode_class[i] = 1 << (int) SF_MODE;
4188 else if (GET_MODE_SIZE (i) == 8)
4189 sparc_mode_class[i] = 1 << (int) DF_MODE;
4190 else if (GET_MODE_SIZE (i) == 16)
4191 sparc_mode_class[i] = 1 << (int) TF_MODE;
4192 else if (GET_MODE_SIZE (i) == 32)
4193 sparc_mode_class[i] = 1 << (int) OF_MODE;
4194 else
4195 sparc_mode_class[i] = 0;
4196 break;
4197 case MODE_CC:
4198 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4199 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4200 else
4201 sparc_mode_class[i] = 1 << (int) CC_MODE;
4202 break;
4203 default:
4204 sparc_mode_class[i] = 0;
4205 break;
4206 }
4207 }
4208
4209 if (TARGET_ARCH64)
4210 hard_regno_mode_classes = hard_64bit_mode_classes;
4211 else
4212 hard_regno_mode_classes = hard_32bit_mode_classes;
4213
4214 /* Initialize the array used by REGNO_REG_CLASS. */
4215 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4216 {
4217 if (i < 16 && TARGET_V8PLUS)
4218 sparc_regno_reg_class[i] = I64_REGS;
4219 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4220 sparc_regno_reg_class[i] = GENERAL_REGS;
4221 else if (i < 64)
4222 sparc_regno_reg_class[i] = FP_REGS;
4223 else if (i < 96)
4224 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4225 else if (i < 100)
4226 sparc_regno_reg_class[i] = FPCC_REGS;
4227 else
4228 sparc_regno_reg_class[i] = NO_REGS;
4229 }
4230 }
4231 \f
4232 /* Compute the frame size required by the function. This function is called
4233 during the reload pass and also by sparc_expand_prologue. */
4234
4235 HOST_WIDE_INT
4236 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4237 {
4238 int outgoing_args_size = (current_function_outgoing_args_size
4239 + REG_PARM_STACK_SPACE (current_function_decl));
4240 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4241 int i;
4242
4243 if (TARGET_ARCH64)
4244 {
4245 for (i = 0; i < 8; i++)
4246 if (regs_ever_live[i] && ! call_used_regs[i])
4247 n_regs += 2;
4248 }
4249 else
4250 {
4251 for (i = 0; i < 8; i += 2)
4252 if ((regs_ever_live[i] && ! call_used_regs[i])
4253 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4254 n_regs += 2;
4255 }
4256
4257 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4258 if ((regs_ever_live[i] && ! call_used_regs[i])
4259 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
4260 n_regs += 2;
4261
4262 /* Set up values for use in prologue and epilogue. */
4263 num_gfregs = n_regs;
4264
4265 if (leaf_function_p
4266 && n_regs == 0
4267 && size == 0
4268 && current_function_outgoing_args_size == 0)
4269 actual_fsize = apparent_fsize = 0;
4270 else
4271 {
4272 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4273 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4274 apparent_fsize += n_regs * 4;
4275 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4276 }
4277
4278 /* Make sure nothing can clobber our register windows.
4279 If a SAVE must be done, or there is a stack-local variable,
4280 the register window area must be allocated.
4281 ??? For v8 we apparently need an additional 8 bytes of reserved space. */
4282 if (! leaf_function_p || size > 0)
4283 actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
4284
4285 return SPARC_STACK_ALIGN (actual_fsize);
4286 }
4287
4288 /* Output any necessary .register pseudo-ops. */
4289
4290 void
4291 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4292 {
4293 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4294 int i;
4295
4296 if (TARGET_ARCH32)
4297 return;
4298
4299 /* Check if %g[2367] were used without
4300 .register being printed for them already. */
4301 for (i = 2; i < 8; i++)
4302 {
4303 if (regs_ever_live [i]
4304 && ! sparc_hard_reg_printed [i])
4305 {
4306 sparc_hard_reg_printed [i] = 1;
4307 fprintf (file, "\t.register\t%%g%d, #scratch\n", i);
4308 }
4309 if (i == 3) i = 5;
4310 }
4311 #endif
4312 }
4313
4314 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4315 as needed. LOW should be double-word aligned for 32-bit registers.
4316 Return the new OFFSET. */
4317
4318 #define SORR_SAVE 0
4319 #define SORR_RESTORE 1
4320
4321 static int
4322 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4323 {
4324 rtx mem, insn;
4325 int i;
4326
4327 if (TARGET_ARCH64 && high <= 32)
4328 {
4329 for (i = low; i < high; i++)
4330 {
4331 if (regs_ever_live[i] && ! call_used_regs[i])
4332 {
4333 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
4334 set_mem_alias_set (mem, sparc_sr_alias_set);
4335 if (action == SORR_SAVE)
4336 {
4337 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4338 RTX_FRAME_RELATED_P (insn) = 1;
4339 }
4340 else /* action == SORR_RESTORE */
4341 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4342 offset += 8;
4343 }
4344 }
4345 }
4346 else
4347 {
4348 for (i = low; i < high; i += 2)
4349 {
4350 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
4351 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
4352 enum machine_mode mode;
4353 int regno;
4354
4355 if (reg0 && reg1)
4356 {
4357 mode = i < 32 ? DImode : DFmode;
4358 regno = i;
4359 }
4360 else if (reg0)
4361 {
4362 mode = i < 32 ? SImode : SFmode;
4363 regno = i;
4364 }
4365 else if (reg1)
4366 {
4367 mode = i < 32 ? SImode : SFmode;
4368 regno = i + 1;
4369 offset += 4;
4370 }
4371 else
4372 continue;
4373
4374 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4375 set_mem_alias_set (mem, sparc_sr_alias_set);
4376 if (action == SORR_SAVE)
4377 {
4378 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4379 RTX_FRAME_RELATED_P (insn) = 1;
4380 }
4381 else /* action == SORR_RESTORE */
4382 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4383
4384 /* Always preserve double-word alignment. */
4385 offset = (offset + 7) & -8;
4386 }
4387 }
4388
4389 return offset;
4390 }
4391
4392 /* Emit code to save call-saved registers. */
4393
4394 static void
4395 emit_save_regs (void)
4396 {
4397 HOST_WIDE_INT offset;
4398 rtx base;
4399
4400 offset = frame_base_offset - apparent_fsize;
4401
4402 if (offset < -4096 || offset + num_gfregs * 4 > 4096)
4403 {
4404 /* ??? This might be optimized a little as %g1 might already have a
4405 value close enough that a single add insn will do. */
4406 /* ??? Although, all of this is probably only a temporary fix
4407 because if %g1 can hold a function result, then
4408 sparc_expand_epilogue will lose (the result will be
4409 clobbered). */
4410 base = gen_rtx_REG (Pmode, 1);
4411 emit_move_insn (base, GEN_INT (offset));
4412 emit_insn (gen_rtx_SET (VOIDmode,
4413 base,
4414 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4415 offset = 0;
4416 }
4417 else
4418 base = frame_base_reg;
4419
4420 offset = save_or_restore_regs (0, 8, base, offset, SORR_SAVE);
4421 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_SAVE);
4422 }
4423
4424 /* Emit code to restore call-saved registers. */
4425
4426 static void
4427 emit_restore_regs (void)
4428 {
4429 HOST_WIDE_INT offset;
4430 rtx base;
4431
4432 offset = frame_base_offset - apparent_fsize;
4433
4434 if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
4435 {
4436 base = gen_rtx_REG (Pmode, 1);
4437 emit_move_insn (base, GEN_INT (offset));
4438 emit_insn (gen_rtx_SET (VOIDmode,
4439 base,
4440 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4441 offset = 0;
4442 }
4443 else
4444 base = frame_base_reg;
4445
4446 offset = save_or_restore_regs (0, 8, base, offset, SORR_RESTORE);
4447 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, SORR_RESTORE);
4448 }
4449
4450 /* Emit an increment for the stack pointer. */
4451
4452 static void
4453 emit_stack_pointer_increment (rtx increment)
4454 {
4455 if (TARGET_ARCH64)
4456 emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4457 else
4458 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, increment));
4459 }
4460
4461 /* Emit a decrement for the stack pointer. */
4462
4463 static void
4464 emit_stack_pointer_decrement (rtx decrement)
4465 {
4466 if (TARGET_ARCH64)
4467 emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4468 else
4469 emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, decrement));
4470 }
4471
4472 /* Expand the function prologue. The prologue is responsible for reserving
4473 storage for the frame, saving the call-saved registers and loading the
4474 PIC register if needed. */
4475
4476 void
4477 sparc_expand_prologue (void)
4478 {
4479 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4480 on the final value of the flag means deferring the prologue/epilogue
4481 expansion until just before the second scheduling pass, which is too
4482 late to emit multiple epilogues or return insns.
4483
4484 Of course we are making the assumption that the value of the flag
4485 will not change between now and its final value. Of the three parts
4486 of the formula, only the last one can reasonably vary. Let's take a
4487 closer look, after assuming that the first two ones are set to true
4488 (otherwise the last value is effectively silenced).
4489
4490 If only_leaf_regs_used returns false, the global predicate will also
4491 be false so the actual frame size calculated below will be positive.
4492 As a consequence, the save_register_window insn will be emitted in
4493 the instruction stream; now this insn explicitly references %fp
4494 which is not a leaf register so only_leaf_regs_used will always
4495 return false subsequently.
4496
4497 If only_leaf_regs_used returns true, we hope that the subsequent
4498 optimization passes won't cause non-leaf registers to pop up. For
4499 example, the regrename pass has special provisions to not rename to
4500 non-leaf registers in a leaf function. */
4501 sparc_leaf_function_p
4502 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4503
4504 /* Need to use actual_fsize, since we are also allocating
4505 space for our callee (and our own register save area). */
4506 actual_fsize
4507 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4508
4509 /* Advertise that the data calculated just above are now valid. */
4510 sparc_prologue_data_valid_p = true;
4511
4512 if (sparc_leaf_function_p)
4513 {
4514 frame_base_reg = stack_pointer_rtx;
4515 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4516 }
4517 else
4518 {
4519 frame_base_reg = hard_frame_pointer_rtx;
4520 frame_base_offset = SPARC_STACK_BIAS;
4521 }
4522
4523 if (actual_fsize == 0)
4524 /* do nothing. */ ;
4525 else if (sparc_leaf_function_p)
4526 {
4527 if (actual_fsize <= 4096)
4528 emit_stack_pointer_increment (GEN_INT (- actual_fsize));
4529 else if (actual_fsize <= 8192)
4530 {
4531 emit_stack_pointer_increment (GEN_INT (-4096));
4532 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4533 }
4534 else
4535 {
4536 rtx reg = gen_rtx_REG (Pmode, 1);
4537 emit_move_insn (reg, GEN_INT (-actual_fsize));
4538 emit_stack_pointer_increment (reg);
4539 }
4540 }
4541 else
4542 {
4543 if (actual_fsize <= 4096)
4544 emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4545 else if (actual_fsize <= 8192)
4546 {
4547 emit_insn (gen_save_register_window (GEN_INT (-4096)));
4548 emit_stack_pointer_increment (GEN_INT (4096 - actual_fsize));
4549 }
4550 else
4551 {
4552 rtx reg = gen_rtx_REG (Pmode, 1);
4553 emit_move_insn (reg, GEN_INT (-actual_fsize));
4554 emit_insn (gen_save_register_window (reg));
4555 }
4556 }
4557
4558 /* Call-saved registers are saved just above the outgoing argument area. */
4559 if (num_gfregs)
4560 emit_save_regs ();
4561
4562 /* Load the PIC register if needed. */
4563 if (flag_pic && current_function_uses_pic_offset_table)
4564 load_pic_register ();
4565 }
4566
4567 /* This function generates the assembly code for function entry, which boils
4568 down to emitting the necessary .register directives. It also informs the
4569 DWARF-2 back-end on the layout of the frame.
4570
4571 ??? Historical cruft: "On SPARC, move-double insns between fpu and cpu need
4572 an 8-byte block of memory. If any fpu reg is used in the function, we
4573 allocate such a block here, at the bottom of the frame, just in case it's
4574 needed." Could this explain the -8 in emit_restore_regs? */
4575
4576 static void
4577 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4578 {
4579 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4580 if (sparc_leaf_function_p != current_function_uses_only_leaf_regs)
4581 abort();
4582
4583 sparc_output_scratch_registers (file);
4584
4585 if (dwarf2out_do_frame () && actual_fsize)
4586 {
4587 char *label = dwarf2out_cfi_label ();
4588
4589 /* The canonical frame address refers to the top of the frame. */
4590 dwarf2out_def_cfa (label,
4591 sparc_leaf_function_p
4592 ? STACK_POINTER_REGNUM
4593 : HARD_FRAME_POINTER_REGNUM,
4594 frame_base_offset);
4595
4596 if (! sparc_leaf_function_p)
4597 {
4598 /* Note the register window save. This tells the unwinder that
4599 it needs to restore the window registers from the previous
4600 frame's window save area at 0(cfa). */
4601 dwarf2out_window_save (label);
4602
4603 /* The return address (-8) is now in %i7. */
4604 dwarf2out_return_reg (label, 31);
4605 }
4606 }
4607 }
4608
4609 /* Expand the function epilogue, either normal or part of a sibcall.
4610 We emit all the instructions except the return or the call. */
4611
4612 void
4613 sparc_expand_epilogue (void)
4614 {
4615 if (num_gfregs)
4616 emit_restore_regs ();
4617
4618 if (actual_fsize == 0)
4619 /* do nothing. */ ;
4620 else if (sparc_leaf_function_p)
4621 {
4622 if (actual_fsize <= 4096)
4623 emit_stack_pointer_decrement (GEN_INT (- actual_fsize));
4624 else if (actual_fsize <= 8192)
4625 {
4626 emit_stack_pointer_decrement (GEN_INT (-4096));
4627 emit_stack_pointer_decrement (GEN_INT (4096 - actual_fsize));
4628 }
4629 else
4630 {
4631 rtx reg = gen_rtx_REG (Pmode, 1);
4632 emit_move_insn (reg, GEN_INT (-actual_fsize));
4633 emit_stack_pointer_decrement (reg);
4634 }
4635 }
4636 }
4637
4638 /* Return true if it is appropriate to emit `return' instructions in the
4639 body of a function. */
4640
4641 bool
4642 sparc_can_use_return_insn_p (void)
4643 {
4644 return sparc_prologue_data_valid_p
4645 && (actual_fsize == 0 || !sparc_leaf_function_p);
4646 }
4647
4648 /* This function generates the assembly code for function exit. */
4649
4650 static void
4651 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4652 {
4653 /* If code does not drop into the epilogue, we have to still output
4654 a dummy nop for the sake of sane backtraces. Otherwise, if the
4655 last two instructions of a function were "call foo; dslot;" this
4656 can make the return PC of foo (i.e. address of call instruction
4657 plus 8) point to the first instruction in the next function. */
4658
4659 rtx insn, last_real_insn;
4660
4661 insn = get_last_insn ();
4662
4663 last_real_insn = prev_real_insn (insn);
4664 if (last_real_insn
4665 && GET_CODE (last_real_insn) == INSN
4666 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4667 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4668
4669 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4670 fputs("\tnop\n", file);
4671
4672 sparc_output_deferred_case_vectors ();
4673 }
4674
4675 /* Output a 'restore' instruction. */
4676
4677 static void
4678 output_restore (rtx pat)
4679 {
4680 rtx operands[3];
4681
4682 if (! pat)
4683 {
4684 fputs ("\t restore\n", asm_out_file);
4685 return;
4686 }
4687
4688 if (GET_CODE (pat) != SET)
4689 abort ();
4690
4691 operands[0] = SET_DEST (pat);
4692 pat = SET_SRC (pat);
4693
4694 switch (GET_CODE (pat))
4695 {
4696 case PLUS:
4697 operands[1] = XEXP (pat, 0);
4698 operands[2] = XEXP (pat, 1);
4699 output_asm_insn (" restore %r1, %2, %Y0", operands);
4700 break;
4701 case LO_SUM:
4702 operands[1] = XEXP (pat, 0);
4703 operands[2] = XEXP (pat, 1);
4704 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4705 break;
4706 case ASHIFT:
4707 operands[1] = XEXP (pat, 0);
4708 if (XEXP (pat, 1) != const1_rtx)
4709 abort();
4710 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4711 break;
4712 default:
4713 operands[1] = pat;
4714 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4715 break;
4716 }
4717 }
4718
4719 /* Output a return. */
4720
4721 const char *
4722 output_return (rtx insn)
4723 {
4724 if (sparc_leaf_function_p)
4725 {
4726 /* This is a leaf function so we don't have to bother restoring the
4727 register window, which frees us from dealing with the convoluted
4728 semantics of restore/return. We simply output the jump to the
4729 return address and the insn in the delay slot (if any). */
4730
4731 if (current_function_calls_eh_return)
4732 abort ();
4733
4734 return "jmp\t%%o7+%)%#";
4735 }
4736 else
4737 {
4738 /* This is a regular function so we have to restore the register window.
4739 We may have a pending insn for the delay slot, which will be either
4740 combined with the 'restore' instruction or put in the delay slot of
4741 the 'return' instruction. */
4742
4743 if (current_function_calls_eh_return)
4744 {
4745 /* If the function uses __builtin_eh_return, the eh_return
4746 machinery occupies the delay slot. */
4747 if (final_sequence)
4748 abort ();
4749
4750 if (! flag_delayed_branch)
4751 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4752
4753 if (TARGET_V9)
4754 fputs ("\treturn\t%i7+8\n", asm_out_file);
4755 else
4756 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4757
4758 if (flag_delayed_branch)
4759 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4760 else
4761 fputs ("\t nop\n", asm_out_file);
4762 }
4763 else if (final_sequence)
4764 {
4765 rtx delay, pat;
4766
4767 delay = NEXT_INSN (insn);
4768 if (! delay)
4769 abort ();
4770
4771 pat = PATTERN (delay);
4772
4773 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4774 {
4775 epilogue_renumber (&pat, 0);
4776 return "return\t%%i7+%)%#";
4777 }
4778 else
4779 {
4780 output_asm_insn ("jmp\t%%i7+%)", NULL);
4781 output_restore (pat);
4782 PATTERN (delay) = gen_blockage ();
4783 INSN_CODE (delay) = -1;
4784 }
4785 }
4786 else
4787 {
4788 /* The delay slot is empty. */
4789 if (TARGET_V9)
4790 return "return\t%%i7+%)\n\t nop";
4791 else if (flag_delayed_branch)
4792 return "jmp\t%%i7+%)\n\t restore";
4793 else
4794 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4795 }
4796 }
4797
4798 return "";
4799 }
4800
4801 /* Output a sibling call. */
4802
4803 const char *
4804 output_sibcall (rtx insn, rtx call_operand)
4805 {
4806 rtx operands[1];
4807
4808 if (! flag_delayed_branch)
4809 abort();
4810
4811 operands[0] = call_operand;
4812
4813 if (sparc_leaf_function_p)
4814 {
4815 /* This is a leaf function so we don't have to bother restoring the
4816 register window. We simply output the jump to the function and
4817 the insn in the delay slot (if any). */
4818
4819 if (LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence)
4820 abort();
4821
4822 if (final_sequence)
4823 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4824 operands);
4825 else
4826 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4827 it into branch if possible. */
4828 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4829 operands);
4830 }
4831 else
4832 {
4833 /* This is a regular function so we have to restore the register window.
4834 We may have a pending insn for the delay slot, which will be combined
4835 with the 'restore' instruction. */
4836
4837 output_asm_insn ("call\t%a0, 0", operands);
4838
4839 if (final_sequence)
4840 {
4841 rtx delay = NEXT_INSN (insn);
4842 if (! delay)
4843 abort ();
4844
4845 output_restore (PATTERN (delay));
4846
4847 PATTERN (delay) = gen_blockage ();
4848 INSN_CODE (delay) = -1;
4849 }
4850 else
4851 output_restore (NULL_RTX);
4852 }
4853
4854 return "";
4855 }
4856 \f
4857 /* Functions for handling argument passing.
4858
4859 For 32-bit, the first 6 args are normally in registers and the rest are
4860 pushed. Any arg that starts within the first 6 words is at least
4861 partially passed in a register unless its data type forbids.
4862
4863 For 64-bit, the argument registers are laid out as an array of 16 elements
4864 and arguments are added sequentially. The first 6 int args and up to the
4865 first 16 fp args (depending on size) are passed in regs.
4866
4867 Slot Stack Integral Float Float in structure Double Long Double
4868 ---- ----- -------- ----- ------------------ ------ -----------
4869 15 [SP+248] %f31 %f30,%f31 %d30
4870 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4871 13 [SP+232] %f27 %f26,%f27 %d26
4872 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4873 11 [SP+216] %f23 %f22,%f23 %d22
4874 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4875 9 [SP+200] %f19 %f18,%f19 %d18
4876 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4877 7 [SP+184] %f15 %f14,%f15 %d14
4878 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4879 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4880 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4881 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4882 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4883 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4884 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4885
4886 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4887
4888 Integral arguments are always passed as 64-bit quantities appropriately
4889 extended.
4890
4891 Passing of floating point values is handled as follows.
4892 If a prototype is in scope:
4893 If the value is in a named argument (i.e. not a stdarg function or a
4894 value not part of the `...') then the value is passed in the appropriate
4895 fp reg.
4896 If the value is part of the `...' and is passed in one of the first 6
4897 slots then the value is passed in the appropriate int reg.
4898 If the value is part of the `...' and is not passed in one of the first 6
4899 slots then the value is passed in memory.
4900 If a prototype is not in scope:
4901 If the value is one of the first 6 arguments the value is passed in the
4902 appropriate integer reg and the appropriate fp reg.
4903 If the value is not one of the first 6 arguments the value is passed in
4904 the appropriate fp reg and in memory.
4905
4906
4907 Summary of the calling conventions implemented by GCC on SPARC:
4908
4909 32-bit ABI:
4910 size argument return value
4911
4912 small integer <4 int. reg. int. reg.
4913 word 4 int. reg. int. reg.
4914 double word 8 int. reg. int. reg.
4915
4916 _Complex small integer <8 int. reg. int. reg.
4917 _Complex word 8 int. reg. int. reg.
4918 _Complex double word 16 memory int. reg.
4919
4920 vector integer <=8 int. reg. FP reg.
4921 vector integer >8 memory memory
4922
4923 float 4 int. reg. FP reg.
4924 double 8 int. reg. FP reg.
4925 long double 16 memory memory
4926
4927 _Complex float 8 memory FP reg.
4928 _Complex double 16 memory FP reg.
4929 _Complex long double 32 memory FP reg.
4930
4931 vector float any memory memory
4932
4933 aggregate any memory memory
4934
4935
4936
4937 64-bit ABI:
4938 size argument return value
4939
4940 small integer <8 int. reg. int. reg.
4941 word 8 int. reg. int. reg.
4942 double word 16 int. reg. int. reg.
4943
4944 _Complex small integer <16 int. reg. int. reg.
4945 _Complex word 16 int. reg. int. reg.
4946 _Complex double word 32 memory int. reg.
4947
4948 vector integer <=16 FP reg. FP reg.
4949 vector integer 16<s<=32 memory FP reg.
4950 vector integer >32 memory memory
4951
4952 float 4 FP reg. FP reg.
4953 double 8 FP reg. FP reg.
4954 long double 16 FP reg. FP reg.
4955
4956 _Complex float 8 FP reg. FP reg.
4957 _Complex double 16 FP reg. FP reg.
4958 _Complex long double 32 memory FP reg.
4959
4960 vector float <=16 FP reg. FP reg.
4961 vector float 16<s<=32 memory FP reg.
4962 vector float >32 memory memory
4963
4964 aggregate <=16 reg. reg.
4965 aggregate 16<s<=32 memory reg.
4966 aggregate >32 memory memory
4967
4968
4969
4970 Note #1: complex floating-point types follow the extended SPARC ABIs as
4971 implemented by the Sun compiler.
4972
4973 Note #2: integral vector types follow the scalar floating-point types
4974 conventions to match what is implemented by the Sun VIS SDK.
4975
4976 Note #3: floating-point vector types follow the aggregate types
4977 conventions. */
4978
4979
4980 /* Maximum number of int regs for args. */
4981 #define SPARC_INT_ARG_MAX 6
4982 /* Maximum number of fp regs for args. */
4983 #define SPARC_FP_ARG_MAX 16
4984
4985 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4986
4987 /* Handle the INIT_CUMULATIVE_ARGS macro.
4988 Initialize a variable CUM of type CUMULATIVE_ARGS
4989 for a call to a function whose data type is FNTYPE.
4990 For a library call, FNTYPE is 0. */
4991
4992 void
4993 init_cumulative_args (struct sparc_args *cum, tree fntype,
4994 rtx libname ATTRIBUTE_UNUSED,
4995 tree fndecl ATTRIBUTE_UNUSED)
4996 {
4997 cum->words = 0;
4998 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4999 cum->libcall_p = fntype == 0;
5000 }
5001
5002 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
5003 When a prototype says `char' or `short', really pass an `int'. */
5004
5005 static bool
5006 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
5007 {
5008 return TARGET_ARCH32 ? true : false;
5009 }
5010
5011 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5012
5013 static bool
5014 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
5015 {
5016 return TARGET_ARCH64 ? true : false;
5017 }
5018
5019 /* Scan the record type TYPE and return the following predicates:
5020 - INTREGS_P: the record contains at least one field or sub-field
5021 that is eligible for promotion in integer registers.
5022 - FP_REGS_P: the record contains at least one field or sub-field
5023 that is eligible for promotion in floating-point registers.
5024 - PACKED_P: the record contains at least one field that is packed.
5025
5026 Sub-fields are not taken into account for the PACKED_P predicate. */
5027
5028 static void
5029 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
5030 {
5031 tree field;
5032
5033 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5034 {
5035 if (TREE_CODE (field) == FIELD_DECL)
5036 {
5037 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5038 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5039 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5040 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5041 && TARGET_FPU)
5042 *fpregs_p = 1;
5043 else
5044 *intregs_p = 1;
5045
5046 if (packed_p && DECL_PACKED (field))
5047 *packed_p = 1;
5048 }
5049 }
5050 }
5051
5052 /* Compute the slot number to pass an argument in.
5053 Return the slot number or -1 if passing on the stack.
5054
5055 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5056 the preceding args and about the function being called.
5057 MODE is the argument's machine mode.
5058 TYPE is the data type of the argument (as a tree).
5059 This is null for libcalls where that information may
5060 not be available.
5061 NAMED is nonzero if this argument is a named parameter
5062 (otherwise it is an extra parameter matching an ellipsis).
5063 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5064 *PREGNO records the register number to use if scalar type.
5065 *PPADDING records the amount of padding needed in words. */
5066
5067 static int
5068 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5069 tree type, int named, int incoming_p,
5070 int *pregno, int *ppadding)
5071 {
5072 int regbase = (incoming_p
5073 ? SPARC_INCOMING_INT_ARG_FIRST
5074 : SPARC_OUTGOING_INT_ARG_FIRST);
5075 int slotno = cum->words;
5076 enum mode_class mclass;
5077 int regno;
5078
5079 *ppadding = 0;
5080
5081 if (type && TREE_ADDRESSABLE (type))
5082 return -1;
5083
5084 if (TARGET_ARCH32
5085 && mode == BLKmode
5086 && type
5087 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5088 return -1;
5089
5090 /* For SPARC64, objects requiring 16-byte alignment get it. */
5091 if (TARGET_ARCH64
5092 && GET_MODE_ALIGNMENT (mode) >= 2 * BITS_PER_WORD
5093 && (slotno & 1) != 0)
5094 slotno++, *ppadding = 1;
5095
5096 mclass = GET_MODE_CLASS (mode);
5097 if (type && TREE_CODE (type) == VECTOR_TYPE)
5098 {
5099 /* Vector types deserve special treatment because they are
5100 polymorphic wrt their mode, depending upon whether VIS
5101 instructions are enabled. */
5102 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5103 {
5104 /* The SPARC port defines no floating-point vector modes. */
5105 if (mode != BLKmode)
5106 abort ();
5107 }
5108 else
5109 {
5110 /* Integral vector types should either have a vector
5111 mode or an integral mode, because we are guaranteed
5112 by pass_by_reference that their size is not greater
5113 than 16 bytes and TImode is 16-byte wide. */
5114 if (mode == BLKmode)
5115 abort ();
5116
5117 /* Vector integers are handled like floats according to
5118 the Sun VIS SDK. */
5119 mclass = MODE_FLOAT;
5120 }
5121 }
5122
5123 switch (mclass)
5124 {
5125 case MODE_FLOAT:
5126 case MODE_COMPLEX_FLOAT:
5127 if (TARGET_ARCH64 && TARGET_FPU && named)
5128 {
5129 if (slotno >= SPARC_FP_ARG_MAX)
5130 return -1;
5131 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5132 /* Arguments filling only one single FP register are
5133 right-justified in the outer double FP register. */
5134 if (GET_MODE_SIZE (mode) <= 4)
5135 regno++;
5136 break;
5137 }
5138 /* fallthrough */
5139
5140 case MODE_INT:
5141 case MODE_COMPLEX_INT:
5142 if (slotno >= SPARC_INT_ARG_MAX)
5143 return -1;
5144 regno = regbase + slotno;
5145 break;
5146
5147 case MODE_RANDOM:
5148 if (mode == VOIDmode)
5149 /* MODE is VOIDmode when generating the actual call. */
5150 return -1;
5151
5152 if (mode != BLKmode)
5153 abort ();
5154
5155 /* For SPARC64, objects requiring 16-byte alignment get it. */
5156 if (TARGET_ARCH64
5157 && type
5158 && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
5159 && (slotno & 1) != 0)
5160 slotno++, *ppadding = 1;
5161
5162 if (TARGET_ARCH32 || !type || (TREE_CODE (type) == UNION_TYPE))
5163 {
5164 if (slotno >= SPARC_INT_ARG_MAX)
5165 return -1;
5166 regno = regbase + slotno;
5167 }
5168 else /* TARGET_ARCH64 && type */
5169 {
5170 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5171
5172 /* First see what kinds of registers we would need. */
5173 if (TREE_CODE (type) == VECTOR_TYPE)
5174 fpregs_p = 1;
5175 else
5176 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5177
5178 /* The ABI obviously doesn't specify how packed structures
5179 are passed. These are defined to be passed in int regs
5180 if possible, otherwise memory. */
5181 if (packed_p || !named)
5182 fpregs_p = 0, intregs_p = 1;
5183
5184 /* If all arg slots are filled, then must pass on stack. */
5185 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5186 return -1;
5187
5188 /* If there are only int args and all int arg slots are filled,
5189 then must pass on stack. */
5190 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5191 return -1;
5192
5193 /* Note that even if all int arg slots are filled, fp members may
5194 still be passed in regs if such regs are available.
5195 *PREGNO isn't set because there may be more than one, it's up
5196 to the caller to compute them. */
5197 return slotno;
5198 }
5199 break;
5200
5201 default :
5202 abort ();
5203 }
5204
5205 *pregno = regno;
5206 return slotno;
5207 }
5208
5209 /* Handle recursive register counting for structure field layout. */
5210
5211 struct function_arg_record_value_parms
5212 {
5213 rtx ret; /* return expression being built. */
5214 int slotno; /* slot number of the argument. */
5215 int named; /* whether the argument is named. */
5216 int regbase; /* regno of the base register. */
5217 int stack; /* 1 if part of the argument is on the stack. */
5218 int intoffset; /* offset of the first pending integer field. */
5219 unsigned int nregs; /* number of words passed in registers. */
5220 };
5221
5222 static void function_arg_record_value_3
5223 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5224 static void function_arg_record_value_2
5225 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5226 static void function_arg_record_value_1
5227 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5228 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
5229 static rtx function_arg_union_value (int, enum machine_mode, int);
5230
5231 /* A subroutine of function_arg_record_value. Traverse the structure
5232 recursively and determine how many registers will be required. */
5233
5234 static void
5235 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
5236 struct function_arg_record_value_parms *parms,
5237 bool packed_p)
5238 {
5239 tree field;
5240
5241 /* We need to compute how many registers are needed so we can
5242 allocate the PARALLEL but before we can do that we need to know
5243 whether there are any packed fields. The ABI obviously doesn't
5244 specify how structures are passed in this case, so they are
5245 defined to be passed in int regs if possible, otherwise memory,
5246 regardless of whether there are fp values present. */
5247
5248 if (! packed_p)
5249 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5250 {
5251 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5252 {
5253 packed_p = true;
5254 break;
5255 }
5256 }
5257
5258 /* Compute how many registers we need. */
5259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5260 {
5261 if (TREE_CODE (field) == FIELD_DECL)
5262 {
5263 HOST_WIDE_INT bitpos = startbitpos;
5264
5265 if (DECL_SIZE (field) != 0)
5266 {
5267 if (integer_zerop (DECL_SIZE (field)))
5268 continue;
5269
5270 if (host_integerp (bit_position (field), 1))
5271 bitpos += int_bit_position (field);
5272 }
5273
5274 /* ??? FIXME: else assume zero offset. */
5275
5276 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5277 function_arg_record_value_1 (TREE_TYPE (field),
5278 bitpos,
5279 parms,
5280 packed_p);
5281 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5282 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5283 && TARGET_FPU
5284 && parms->named
5285 && ! packed_p)
5286 {
5287 if (parms->intoffset != -1)
5288 {
5289 unsigned int startbit, endbit;
5290 int intslots, this_slotno;
5291
5292 startbit = parms->intoffset & -BITS_PER_WORD;
5293 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5294
5295 intslots = (endbit - startbit) / BITS_PER_WORD;
5296 this_slotno = parms->slotno + parms->intoffset
5297 / BITS_PER_WORD;
5298
5299 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5300 {
5301 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5302 /* We need to pass this field on the stack. */
5303 parms->stack = 1;
5304 }
5305
5306 parms->nregs += intslots;
5307 parms->intoffset = -1;
5308 }
5309
5310 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5311 If it wasn't true we wouldn't be here. */
5312 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5313 && DECL_MODE (field) == BLKmode)
5314 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5315 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5316 parms->nregs += 2;
5317 else
5318 parms->nregs += 1;
5319 }
5320 else
5321 {
5322 if (parms->intoffset == -1)
5323 parms->intoffset = bitpos;
5324 }
5325 }
5326 }
5327 }
5328
5329 /* A subroutine of function_arg_record_value. Assign the bits of the
5330 structure between parms->intoffset and bitpos to integer registers. */
5331
5332 static void
5333 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5334 struct function_arg_record_value_parms *parms)
5335 {
5336 enum machine_mode mode;
5337 unsigned int regno;
5338 unsigned int startbit, endbit;
5339 int this_slotno, intslots, intoffset;
5340 rtx reg;
5341
5342 if (parms->intoffset == -1)
5343 return;
5344
5345 intoffset = parms->intoffset;
5346 parms->intoffset = -1;
5347
5348 startbit = intoffset & -BITS_PER_WORD;
5349 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5350 intslots = (endbit - startbit) / BITS_PER_WORD;
5351 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5352
5353 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5354 if (intslots <= 0)
5355 return;
5356
5357 /* If this is the trailing part of a word, only load that much into
5358 the register. Otherwise load the whole register. Note that in
5359 the latter case we may pick up unwanted bits. It's not a problem
5360 at the moment but may wish to revisit. */
5361
5362 if (intoffset % BITS_PER_WORD != 0)
5363 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5364 MODE_INT);
5365 else
5366 mode = word_mode;
5367
5368 intoffset /= BITS_PER_UNIT;
5369 do
5370 {
5371 regno = parms->regbase + this_slotno;
5372 reg = gen_rtx_REG (mode, regno);
5373 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5374 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5375
5376 this_slotno += 1;
5377 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5378 mode = word_mode;
5379 parms->nregs += 1;
5380 intslots -= 1;
5381 }
5382 while (intslots > 0);
5383 }
5384
5385 /* A subroutine of function_arg_record_value. Traverse the structure
5386 recursively and assign bits to floating point registers. Track which
5387 bits in between need integer registers; invoke function_arg_record_value_3
5388 to make that happen. */
5389
5390 static void
5391 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
5392 struct function_arg_record_value_parms *parms,
5393 bool packed_p)
5394 {
5395 tree field;
5396
5397 if (! packed_p)
5398 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5399 {
5400 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5401 {
5402 packed_p = true;
5403 break;
5404 }
5405 }
5406
5407 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5408 {
5409 if (TREE_CODE (field) == FIELD_DECL)
5410 {
5411 HOST_WIDE_INT bitpos = startbitpos;
5412
5413 if (DECL_SIZE (field) != 0)
5414 {
5415 if (integer_zerop (DECL_SIZE (field)))
5416 continue;
5417
5418 if (host_integerp (bit_position (field), 1))
5419 bitpos += int_bit_position (field);
5420 }
5421
5422 /* ??? FIXME: else assume zero offset. */
5423
5424 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5425 function_arg_record_value_2 (TREE_TYPE (field),
5426 bitpos,
5427 parms,
5428 packed_p);
5429 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5430 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5431 && TARGET_FPU
5432 && parms->named
5433 && ! packed_p)
5434 {
5435 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5436 int regno, nregs, pos;
5437 enum machine_mode mode = DECL_MODE (field);
5438 rtx reg;
5439
5440 function_arg_record_value_3 (bitpos, parms);
5441
5442 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5443 && mode == BLKmode)
5444 {
5445 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5446 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5447 }
5448 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5449 {
5450 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5451 nregs = 2;
5452 }
5453 else
5454 nregs = 1;
5455
5456 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5457 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5458 regno++;
5459 reg = gen_rtx_REG (mode, regno);
5460 pos = bitpos / BITS_PER_UNIT;
5461 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5462 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5463 parms->nregs += 1;
5464 while (--nregs > 0)
5465 {
5466 regno += GET_MODE_SIZE (mode) / 4;
5467 reg = gen_rtx_REG (mode, regno);
5468 pos += GET_MODE_SIZE (mode);
5469 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5470 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5471 parms->nregs += 1;
5472 }
5473 }
5474 else
5475 {
5476 if (parms->intoffset == -1)
5477 parms->intoffset = bitpos;
5478 }
5479 }
5480 }
5481 }
5482
5483 /* Used by function_arg and function_value to implement the complex
5484 conventions of the 64-bit ABI for passing and returning structures.
5485 Return an expression valid as a return value for the two macros
5486 FUNCTION_ARG and FUNCTION_VALUE.
5487
5488 TYPE is the data type of the argument (as a tree).
5489 This is null for libcalls where that information may
5490 not be available.
5491 MODE is the argument's machine mode.
5492 SLOTNO is the index number of the argument's slot in the parameter array.
5493 NAMED is nonzero if this argument is a named parameter
5494 (otherwise it is an extra parameter matching an ellipsis).
5495 REGBASE is the regno of the base register for the parameter array. */
5496
5497 static rtx
5498 function_arg_record_value (tree type, enum machine_mode mode,
5499 int slotno, int named, int regbase)
5500 {
5501 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5502 struct function_arg_record_value_parms parms;
5503 unsigned int nregs;
5504
5505 parms.ret = NULL_RTX;
5506 parms.slotno = slotno;
5507 parms.named = named;
5508 parms.regbase = regbase;
5509 parms.stack = 0;
5510
5511 /* Compute how many registers we need. */
5512 parms.nregs = 0;
5513 parms.intoffset = 0;
5514 function_arg_record_value_1 (type, 0, &parms, false);
5515
5516 /* Take into account pending integer fields. */
5517 if (parms.intoffset != -1)
5518 {
5519 unsigned int startbit, endbit;
5520 int intslots, this_slotno;
5521
5522 startbit = parms.intoffset & -BITS_PER_WORD;
5523 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5524 intslots = (endbit - startbit) / BITS_PER_WORD;
5525 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5526
5527 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5528 {
5529 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5530 /* We need to pass this field on the stack. */
5531 parms.stack = 1;
5532 }
5533
5534 parms.nregs += intslots;
5535 }
5536 nregs = parms.nregs;
5537
5538 /* Allocate the vector and handle some annoying special cases. */
5539 if (nregs == 0)
5540 {
5541 /* ??? Empty structure has no value? Duh? */
5542 if (typesize <= 0)
5543 {
5544 /* Though there's nothing really to store, return a word register
5545 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5546 leads to breakage due to the fact that there are zero bytes to
5547 load. */
5548 return gen_rtx_REG (mode, regbase);
5549 }
5550 else
5551 {
5552 /* ??? C++ has structures with no fields, and yet a size. Give up
5553 for now and pass everything back in integer registers. */
5554 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5555 }
5556 if (nregs + slotno > SPARC_INT_ARG_MAX)
5557 nregs = SPARC_INT_ARG_MAX - slotno;
5558 }
5559 if (nregs == 0)
5560 abort ();
5561
5562 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5563
5564 /* If at least one field must be passed on the stack, generate
5565 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5566 also be passed on the stack. We can't do much better because the
5567 semantics of FUNCTION_ARG_PARTIAL_NREGS doesn't handle the case
5568 of structures for which the fields passed exclusively in registers
5569 are not at the beginning of the structure. */
5570 if (parms.stack)
5571 XVECEXP (parms.ret, 0, 0)
5572 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5573
5574 /* Fill in the entries. */
5575 parms.nregs = 0;
5576 parms.intoffset = 0;
5577 function_arg_record_value_2 (type, 0, &parms, false);
5578 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5579
5580 if (parms.nregs != nregs)
5581 abort ();
5582
5583 return parms.ret;
5584 }
5585
5586 /* Used by function_arg and function_value to implement the conventions
5587 of the 64-bit ABI for passing and returning unions.
5588 Return an expression valid as a return value for the two macros
5589 FUNCTION_ARG and FUNCTION_VALUE.
5590
5591 SIZE is the size in bytes of the union.
5592 MODE is the argument's machine mode.
5593 REGNO is the hard register the union will be passed in. */
5594
5595 static rtx
5596 function_arg_union_value (int size, enum machine_mode mode, int regno)
5597 {
5598 int nwords = ROUND_ADVANCE (size), i;
5599 rtx regs;
5600
5601 /* See comment in previous function for empty structures. */
5602 if (nwords == 0)
5603 return gen_rtx_REG (mode, regno);
5604
5605 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5606
5607 for (i = 0; i < nwords; i++)
5608 {
5609 /* Unions are passed left-justified. */
5610 XVECEXP (regs, 0, i)
5611 = gen_rtx_EXPR_LIST (VOIDmode,
5612 gen_rtx_REG (word_mode, regno),
5613 GEN_INT (UNITS_PER_WORD * i));
5614 regno++;
5615 }
5616
5617 return regs;
5618 }
5619
5620 /* Used by function_arg and function_value to implement the conventions
5621 for passing and returning large (BLKmode) vectors.
5622 Return an expression valid as a return value for the two macros
5623 FUNCTION_ARG and FUNCTION_VALUE.
5624
5625 SIZE is the size in bytes of the vector.
5626 BASE_MODE is the argument's base machine mode.
5627 REGNO is the FP hard register the vector will be passed in. */
5628
5629 static rtx
5630 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5631 {
5632 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5633 int nregs = size / base_mode_size, i;
5634 rtx regs;
5635
5636 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5637
5638 for (i = 0; i < nregs; i++)
5639 {
5640 XVECEXP (regs, 0, i)
5641 = gen_rtx_EXPR_LIST (VOIDmode,
5642 gen_rtx_REG (base_mode, regno),
5643 GEN_INT (base_mode_size * i));
5644 regno += base_mode_size / 4;
5645 }
5646
5647 return regs;
5648 }
5649
5650 /* Handle the FUNCTION_ARG macro.
5651 Determine where to put an argument to a function.
5652 Value is zero to push the argument on the stack,
5653 or a hard register in which to store the argument.
5654
5655 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5656 the preceding args and about the function being called.
5657 MODE is the argument's machine mode.
5658 TYPE is the data type of the argument (as a tree).
5659 This is null for libcalls where that information may
5660 not be available.
5661 NAMED is nonzero if this argument is a named parameter
5662 (otherwise it is an extra parameter matching an ellipsis).
5663 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5664
5665 rtx
5666 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5667 tree type, int named, int incoming_p)
5668 {
5669 int regbase = (incoming_p
5670 ? SPARC_INCOMING_INT_ARG_FIRST
5671 : SPARC_OUTGOING_INT_ARG_FIRST);
5672 int slotno, regno, padding;
5673 enum mode_class mclass = GET_MODE_CLASS (mode);
5674 rtx reg;
5675
5676 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5677 &regno, &padding);
5678
5679 if (slotno == -1)
5680 return 0;
5681
5682 if (TARGET_ARCH32)
5683 {
5684 reg = gen_rtx_REG (mode, regno);
5685 return reg;
5686 }
5687
5688 if (type && TREE_CODE (type) == RECORD_TYPE)
5689 {
5690 /* Structures up to 16 bytes in size are passed in arg slots on the
5691 stack and are promoted to registers where possible. */
5692
5693 if (int_size_in_bytes (type) > 16)
5694 abort (); /* shouldn't get here */
5695
5696 return function_arg_record_value (type, mode, slotno, named, regbase);
5697 }
5698 else if (type && TREE_CODE (type) == UNION_TYPE)
5699 {
5700 HOST_WIDE_INT size = int_size_in_bytes (type);
5701
5702 if (size > 16)
5703 abort (); /* shouldn't get here */
5704
5705 return function_arg_union_value (size, mode, regno);
5706 }
5707 else if (type && TREE_CODE (type) == VECTOR_TYPE)
5708 {
5709 /* Vector types deserve special treatment because they are
5710 polymorphic wrt their mode, depending upon whether VIS
5711 instructions are enabled. */
5712 HOST_WIDE_INT size = int_size_in_bytes (type);
5713
5714 if (size > 16)
5715 abort (); /* shouldn't get here */
5716
5717 if (mode == BLKmode)
5718 return function_arg_vector_value (size,
5719 TYPE_MODE (TREE_TYPE (type)),
5720 SPARC_FP_ARG_FIRST + 2*slotno);
5721 else
5722 mclass = MODE_FLOAT;
5723 }
5724
5725 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5726 but also have the slot allocated for them.
5727 If no prototype is in scope fp values in register slots get passed
5728 in two places, either fp regs and int regs or fp regs and memory. */
5729 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5730 && SPARC_FP_REG_P (regno))
5731 {
5732 reg = gen_rtx_REG (mode, regno);
5733 if (cum->prototype_p || cum->libcall_p)
5734 {
5735 /* "* 2" because fp reg numbers are recorded in 4 byte
5736 quantities. */
5737 #if 0
5738 /* ??? This will cause the value to be passed in the fp reg and
5739 in the stack. When a prototype exists we want to pass the
5740 value in the reg but reserve space on the stack. That's an
5741 optimization, and is deferred [for a bit]. */
5742 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5743 return gen_rtx_PARALLEL (mode,
5744 gen_rtvec (2,
5745 gen_rtx_EXPR_LIST (VOIDmode,
5746 NULL_RTX, const0_rtx),
5747 gen_rtx_EXPR_LIST (VOIDmode,
5748 reg, const0_rtx)));
5749 else
5750 #else
5751 /* ??? It seems that passing back a register even when past
5752 the area declared by REG_PARM_STACK_SPACE will allocate
5753 space appropriately, and will not copy the data onto the
5754 stack, exactly as we desire.
5755
5756 This is due to locate_and_pad_parm being called in
5757 expand_call whenever reg_parm_stack_space > 0, which
5758 while beneficial to our example here, would seem to be
5759 in error from what had been intended. Ho hum... -- r~ */
5760 #endif
5761 return reg;
5762 }
5763 else
5764 {
5765 rtx v0, v1;
5766
5767 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5768 {
5769 int intreg;
5770
5771 /* On incoming, we don't need to know that the value
5772 is passed in %f0 and %i0, and it confuses other parts
5773 causing needless spillage even on the simplest cases. */
5774 if (incoming_p)
5775 return reg;
5776
5777 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5778 + (regno - SPARC_FP_ARG_FIRST) / 2);
5779
5780 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5781 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5782 const0_rtx);
5783 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5784 }
5785 else
5786 {
5787 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5788 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5789 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5790 }
5791 }
5792 }
5793 else
5794 {
5795 /* Scalar or complex int. */
5796 reg = gen_rtx_REG (mode, regno);
5797 }
5798
5799 return reg;
5800 }
5801
5802 /* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
5803 For an arg passed partly in registers and partly in memory,
5804 this is the number of registers used.
5805 For args passed entirely in registers or entirely in memory, zero.
5806
5807 Any arg that starts in the first 6 regs but won't entirely fit in them
5808 needs partial registers on v8. On v9, structures with integer
5809 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5810 values that begin in the last fp reg [where "last fp reg" varies with the
5811 mode] will be split between that reg and memory. */
5812
5813 int
5814 function_arg_partial_nregs (const struct sparc_args *cum,
5815 enum machine_mode mode, tree type, int named)
5816 {
5817 int slotno, regno, padding;
5818
5819 /* We pass 0 for incoming_p here, it doesn't matter. */
5820 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5821
5822 if (slotno == -1)
5823 return 0;
5824
5825 if (TARGET_ARCH32)
5826 {
5827 if ((slotno + (mode == BLKmode
5828 ? ROUND_ADVANCE (int_size_in_bytes (type))
5829 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5830 > SPARC_INT_ARG_MAX)
5831 return SPARC_INT_ARG_MAX - slotno;
5832 }
5833 else
5834 {
5835 /* We are guaranteed by pass_by_reference that the size of the
5836 argument is not greater than 16 bytes, so we only need to
5837 return 1 if the argument is partially passed in registers. */
5838
5839 if (type && AGGREGATE_TYPE_P (type))
5840 {
5841 int size = int_size_in_bytes (type);
5842
5843 if (size > UNITS_PER_WORD
5844 && slotno == SPARC_INT_ARG_MAX - 1)
5845 return 1;
5846 }
5847 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5848 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5849 && ! (TARGET_FPU && named)))
5850 {
5851 /* The complex types are passed as packed types. */
5852 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5853 && slotno == SPARC_INT_ARG_MAX - 1)
5854 return 1;
5855 }
5856 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5857 {
5858 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5859 > SPARC_FP_ARG_MAX)
5860 return 1;
5861 }
5862 }
5863
5864 return 0;
5865 }
5866
5867 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5868 Specify whether to pass the argument by reference. */
5869
5870 static bool
5871 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5872 enum machine_mode mode, tree type,
5873 bool named ATTRIBUTE_UNUSED)
5874 {
5875 if (TARGET_ARCH32)
5876 {
5877 /* Original SPARC 32-bit ABI says that structures and unions,
5878 and quad-precision floats are passed by reference. For Pascal,
5879 also pass arrays by reference. All other base types are passed
5880 in registers.
5881
5882 Extended ABI (as implemented by the Sun compiler) says that all
5883 complex floats are passed by reference. Pass complex integers
5884 in registers up to 8 bytes. More generally, enforce the 2-word
5885 cap for passing arguments in registers.
5886
5887 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5888 integers are passed like floats of the same size, that is in
5889 registers up to 8 bytes. Pass all vector floats by reference
5890 like structure and unions. */
5891 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5892 || mode == SCmode
5893 /* Catch CDImode, TFmode, DCmode and TCmode. */
5894 || GET_MODE_SIZE (mode) > 8
5895 || (type
5896 && TREE_CODE (type) == VECTOR_TYPE
5897 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5898 }
5899 else
5900 {
5901 /* Original SPARC 64-bit ABI says that structures and unions
5902 smaller than 16 bytes are passed in registers, as well as
5903 all other base types. For Pascal, pass arrays by reference.
5904
5905 Extended ABI (as implemented by the Sun compiler) says that
5906 complex floats are passed in registers up to 16 bytes. Pass
5907 all complex integers in registers up to 16 bytes. More generally,
5908 enforce the 2-word cap for passing arguments in registers.
5909
5910 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5911 integers are passed like floats of the same size, that is in
5912 registers (up to 16 bytes). Pass all vector floats like structure
5913 and unions. */
5914 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5915 || (type
5916 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5917 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5918 /* Catch CTImode and TCmode. */
5919 || GET_MODE_SIZE (mode) > 16);
5920 }
5921 }
5922
5923 /* Handle the FUNCTION_ARG_ADVANCE macro.
5924 Update the data in CUM to advance over an argument
5925 of mode MODE and data type TYPE.
5926 TYPE is null for libcalls where that information may not be available. */
5927
5928 void
5929 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5930 tree type, int named)
5931 {
5932 int slotno, regno, padding;
5933
5934 /* We pass 0 for incoming_p here, it doesn't matter. */
5935 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5936
5937 /* If register required leading padding, add it. */
5938 if (slotno != -1)
5939 cum->words += padding;
5940
5941 if (TARGET_ARCH32)
5942 {
5943 cum->words += (mode != BLKmode
5944 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5945 : ROUND_ADVANCE (int_size_in_bytes (type)));
5946 }
5947 else
5948 {
5949 if (type && AGGREGATE_TYPE_P (type))
5950 {
5951 int size = int_size_in_bytes (type);
5952
5953 if (size <= 8)
5954 ++cum->words;
5955 else if (size <= 16)
5956 cum->words += 2;
5957 else /* passed by reference */
5958 ++cum->words;
5959 }
5960 else
5961 {
5962 cum->words += (mode != BLKmode
5963 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5964 : ROUND_ADVANCE (int_size_in_bytes (type)));
5965 }
5966 }
5967 }
5968
5969 /* Handle the FUNCTION_ARG_PADDING macro.
5970 For the 64 bit ABI structs are always stored left shifted in their
5971 argument slot. */
5972
5973 enum direction
5974 function_arg_padding (enum machine_mode mode, tree type)
5975 {
5976 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5977 return upward;
5978
5979 /* Fall back to the default. */
5980 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5981 }
5982
5983 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5984 Specify whether to return the return value in memory. */
5985
5986 static bool
5987 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5988 {
5989 if (TARGET_ARCH32)
5990 /* Original SPARC 32-bit ABI says that structures and unions,
5991 and quad-precision floats are returned in memory. All other
5992 base types are returned in registers.
5993
5994 Extended ABI (as implemented by the Sun compiler) says that
5995 all complex floats are returned in registers (8 FP registers
5996 at most for '_Complex long double'). Return all complex integers
5997 in registers (4 at most for '_Complex long long').
5998
5999 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6000 integers are returned like floats of the same size, that is in
6001 registers up to 8 bytes and in memory otherwise. Return all
6002 vector floats in memory like structure and unions; note that
6003 they always have BLKmode like the latter. */
6004 return (TYPE_MODE (type) == BLKmode
6005 || TYPE_MODE (type) == TFmode
6006 || (TREE_CODE (type) == VECTOR_TYPE
6007 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6008 else
6009 /* Original SPARC 64-bit ABI says that structures and unions
6010 smaller than 32 bytes are returned in registers, as well as
6011 all other base types.
6012
6013 Extended ABI (as implemented by the Sun compiler) says that all
6014 complex floats are returned in registers (8 FP registers at most
6015 for '_Complex long double'). Return all complex integers in
6016 registers (4 at most for '_Complex TItype').
6017
6018 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6019 integers are returned like floats of the same size, that is in
6020 registers. Return all vector floats like structure and unions;
6021 note that they always have BLKmode like the latter. */
6022 return ((TYPE_MODE (type) == BLKmode
6023 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
6024 }
6025
6026 /* Handle the TARGET_STRUCT_VALUE target hook.
6027 Return where to find the structure return value address. */
6028
6029 static rtx
6030 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
6031 {
6032 if (TARGET_ARCH64)
6033 return 0;
6034 else
6035 {
6036 if (incoming)
6037 return gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
6038 STRUCT_VALUE_OFFSET));
6039 else
6040 return gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
6041 STRUCT_VALUE_OFFSET));
6042 }
6043 }
6044
6045 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
6046 For v9, function return values are subject to the same rules as arguments,
6047 except that up to 32 bytes may be returned in registers. */
6048
6049 rtx
6050 function_value (tree type, enum machine_mode mode, int incoming_p)
6051 {
6052 /* Beware that the two values are swapped here wrt function_arg. */
6053 int regbase = (incoming_p
6054 ? SPARC_OUTGOING_INT_ARG_FIRST
6055 : SPARC_INCOMING_INT_ARG_FIRST);
6056 enum mode_class mclass = GET_MODE_CLASS (mode);
6057 int regno;
6058
6059 if (type && TREE_CODE (type) == VECTOR_TYPE)
6060 {
6061 /* Vector types deserve special treatment because they are
6062 polymorphic wrt their mode, depending upon whether VIS
6063 instructions are enabled. */
6064 HOST_WIDE_INT size = int_size_in_bytes (type);
6065
6066 if ((TARGET_ARCH32 && size > 8) || (TARGET_ARCH64 && size > 32))
6067 abort (); /* shouldn't get here */
6068
6069 if (mode == BLKmode)
6070 return function_arg_vector_value (size,
6071 TYPE_MODE (TREE_TYPE (type)),
6072 SPARC_FP_ARG_FIRST);
6073 else
6074 mclass = MODE_FLOAT;
6075 }
6076 else if (type && TARGET_ARCH64)
6077 {
6078 if (TREE_CODE (type) == RECORD_TYPE)
6079 {
6080 /* Structures up to 32 bytes in size are passed in registers,
6081 promoted to fp registers where possible. */
6082
6083 if (int_size_in_bytes (type) > 32)
6084 abort (); /* shouldn't get here */
6085
6086 return function_arg_record_value (type, mode, 0, 1, regbase);
6087 }
6088 else if (TREE_CODE (type) == UNION_TYPE)
6089 {
6090 HOST_WIDE_INT size = int_size_in_bytes (type);
6091
6092 if (size > 32)
6093 abort (); /* shouldn't get here */
6094
6095 return function_arg_union_value (size, mode, regbase);
6096 }
6097 else if (AGGREGATE_TYPE_P (type))
6098 {
6099 /* All other aggregate types are passed in an integer register
6100 in a mode corresponding to the size of the type. */
6101 HOST_WIDE_INT bytes = int_size_in_bytes (type);
6102
6103 if (bytes > 32)
6104 abort (); /* shouldn't get here */
6105
6106 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
6107
6108 /* ??? We probably should have made the same ABI change in
6109 3.4.0 as the one we made for unions. The latter was
6110 required by the SCD though, while the former is not
6111 specified, so we favored compatibility and efficiency.
6112
6113 Now we're stuck for aggregates larger than 16 bytes,
6114 because OImode vanished in the meantime. Let's not
6115 try to be unduly clever, and simply follow the ABI
6116 for unions in that case. */
6117 if (mode == BLKmode)
6118 return function_arg_union_value (bytes, mode, regbase);
6119 else
6120 mclass = MODE_INT;
6121 }
6122 else if (mclass == MODE_INT
6123 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6124 mode = word_mode;
6125 }
6126
6127 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6128 && TARGET_FPU)
6129 regno = SPARC_FP_ARG_FIRST;
6130 else
6131 regno = regbase;
6132
6133 return gen_rtx_REG (mode, regno);
6134 }
6135
6136 /* Do what is necessary for `va_start'. We look at the current function
6137 to determine if stdarg or varargs is used and return the address of
6138 the first unnamed parameter. */
6139
6140 static rtx
6141 sparc_builtin_saveregs (void)
6142 {
6143 int first_reg = current_function_args_info.words;
6144 rtx address;
6145 int regno;
6146
6147 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6148 emit_move_insn (gen_rtx_MEM (word_mode,
6149 gen_rtx_PLUS (Pmode,
6150 frame_pointer_rtx,
6151 GEN_INT (FIRST_PARM_OFFSET (0)
6152 + (UNITS_PER_WORD
6153 * regno)))),
6154 gen_rtx_REG (word_mode,
6155 SPARC_INCOMING_INT_ARG_FIRST + regno));
6156
6157 address = gen_rtx_PLUS (Pmode,
6158 frame_pointer_rtx,
6159 GEN_INT (FIRST_PARM_OFFSET (0)
6160 + UNITS_PER_WORD * first_reg));
6161
6162 return address;
6163 }
6164
6165 /* Implement `va_start' for stdarg. */
6166
6167 void
6168 sparc_va_start (tree valist, rtx nextarg)
6169 {
6170 nextarg = expand_builtin_saveregs ();
6171 std_expand_builtin_va_start (valist, nextarg);
6172 }
6173
6174 /* Implement `va_arg' for stdarg. */
6175
6176 static tree
6177 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
6178 {
6179 HOST_WIDE_INT size, rsize, align;
6180 tree addr, incr;
6181 bool indirect;
6182 tree ptrtype = build_pointer_type (type);
6183
6184 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6185 {
6186 indirect = true;
6187 size = rsize = UNITS_PER_WORD;
6188 align = 0;
6189 }
6190 else
6191 {
6192 indirect = false;
6193 size = int_size_in_bytes (type);
6194 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6195 align = 0;
6196
6197 if (TARGET_ARCH64)
6198 {
6199 /* For SPARC64, objects requiring 16-byte alignment get it. */
6200 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6201 align = 2 * UNITS_PER_WORD;
6202
6203 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6204 are left-justified in their slots. */
6205 if (AGGREGATE_TYPE_P (type))
6206 {
6207 if (size == 0)
6208 size = rsize = UNITS_PER_WORD;
6209 else
6210 size = rsize;
6211 }
6212 }
6213 }
6214
6215 incr = valist;
6216 if (align)
6217 {
6218 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
6219 ssize_int (align - 1)));
6220 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
6221 ssize_int (-align)));
6222 }
6223
6224 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6225 addr = incr;
6226
6227 if (BYTES_BIG_ENDIAN && size < rsize)
6228 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
6229 ssize_int (rsize - size)));
6230
6231 if (indirect)
6232 {
6233 addr = fold_convert (build_pointer_type (ptrtype), addr);
6234 addr = build_va_arg_indirect_ref (addr);
6235 }
6236 /* If the address isn't aligned properly for the type,
6237 we may need to copy to a temporary.
6238 FIXME: This is inefficient. Usually we can do this
6239 in registers. */
6240 else if (align == 0
6241 && TYPE_ALIGN (type) > BITS_PER_WORD)
6242 {
6243 tree tmp = create_tmp_var (type, "va_arg_tmp");
6244 tree dest_addr = build_fold_addr_expr (tmp);
6245
6246 tree copy = build_function_call_expr
6247 (implicit_built_in_decls[BUILT_IN_MEMCPY],
6248 tree_cons (NULL_TREE, dest_addr,
6249 tree_cons (NULL_TREE, addr,
6250 tree_cons (NULL_TREE, size_int (rsize),
6251 NULL_TREE))));
6252
6253 gimplify_and_add (copy, pre_p);
6254 addr = dest_addr;
6255 }
6256 else
6257 addr = fold_convert (ptrtype, addr);
6258
6259 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
6260 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
6261 gimplify_and_add (incr, post_p);
6262
6263 return build_va_arg_indirect_ref (addr);
6264 }
6265 \f
6266 /* Return the string to output an unconditional branch to LABEL, which is
6267 the operand number of the label.
6268
6269 DEST is the destination insn (i.e. the label), INSN is the source. */
6270
6271 const char *
6272 output_ubranch (rtx dest, int label, rtx insn)
6273 {
6274 static char string[64];
6275 bool v9_form = false;
6276 char *p;
6277
6278 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6279 {
6280 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6281 - INSN_ADDRESSES (INSN_UID (insn)));
6282 /* Leave some instructions for "slop". */
6283 if (delta >= -260000 && delta < 260000)
6284 v9_form = true;
6285 }
6286
6287 if (v9_form)
6288 strcpy (string, "ba%*,pt\t%%xcc, ");
6289 else
6290 strcpy (string, "b%*\t");
6291
6292 p = strchr (string, '\0');
6293 *p++ = '%';
6294 *p++ = 'l';
6295 *p++ = '0' + label;
6296 *p++ = '%';
6297 *p++ = '(';
6298 *p = '\0';
6299
6300 return string;
6301 }
6302
6303 /* Return the string to output a conditional branch to LABEL, which is
6304 the operand number of the label. OP is the conditional expression.
6305 XEXP (OP, 0) is assumed to be a condition code register (integer or
6306 floating point) and its mode specifies what kind of comparison we made.
6307
6308 DEST is the destination insn (i.e. the label), INSN is the source.
6309
6310 REVERSED is nonzero if we should reverse the sense of the comparison.
6311
6312 ANNUL is nonzero if we should generate an annulling branch. */
6313
6314 const char *
6315 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6316 rtx insn)
6317 {
6318 static char string[64];
6319 enum rtx_code code = GET_CODE (op);
6320 rtx cc_reg = XEXP (op, 0);
6321 enum machine_mode mode = GET_MODE (cc_reg);
6322 const char *labelno, *branch;
6323 int spaces = 8, far;
6324 char *p;
6325
6326 /* v9 branches are limited to +-1MB. If it is too far away,
6327 change
6328
6329 bne,pt %xcc, .LC30
6330
6331 to
6332
6333 be,pn %xcc, .+12
6334 nop
6335 ba .LC30
6336
6337 and
6338
6339 fbne,a,pn %fcc2, .LC29
6340
6341 to
6342
6343 fbe,pt %fcc2, .+16
6344 nop
6345 ba .LC29 */
6346
6347 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6348 if (reversed ^ far)
6349 {
6350 /* Reversal of FP compares takes care -- an ordered compare
6351 becomes an unordered compare and vice versa. */
6352 if (mode == CCFPmode || mode == CCFPEmode)
6353 code = reverse_condition_maybe_unordered (code);
6354 else
6355 code = reverse_condition (code);
6356 }
6357
6358 /* Start by writing the branch condition. */
6359 if (mode == CCFPmode || mode == CCFPEmode)
6360 {
6361 switch (code)
6362 {
6363 case NE:
6364 branch = "fbne";
6365 break;
6366 case EQ:
6367 branch = "fbe";
6368 break;
6369 case GE:
6370 branch = "fbge";
6371 break;
6372 case GT:
6373 branch = "fbg";
6374 break;
6375 case LE:
6376 branch = "fble";
6377 break;
6378 case LT:
6379 branch = "fbl";
6380 break;
6381 case UNORDERED:
6382 branch = "fbu";
6383 break;
6384 case ORDERED:
6385 branch = "fbo";
6386 break;
6387 case UNGT:
6388 branch = "fbug";
6389 break;
6390 case UNLT:
6391 branch = "fbul";
6392 break;
6393 case UNEQ:
6394 branch = "fbue";
6395 break;
6396 case UNGE:
6397 branch = "fbuge";
6398 break;
6399 case UNLE:
6400 branch = "fbule";
6401 break;
6402 case LTGT:
6403 branch = "fblg";
6404 break;
6405
6406 default:
6407 abort ();
6408 }
6409
6410 /* ??? !v9: FP branches cannot be preceded by another floating point
6411 insn. Because there is currently no concept of pre-delay slots,
6412 we can fix this only by always emitting a nop before a floating
6413 point branch. */
6414
6415 string[0] = '\0';
6416 if (! TARGET_V9)
6417 strcpy (string, "nop\n\t");
6418 strcat (string, branch);
6419 }
6420 else
6421 {
6422 switch (code)
6423 {
6424 case NE:
6425 branch = "bne";
6426 break;
6427 case EQ:
6428 branch = "be";
6429 break;
6430 case GE:
6431 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6432 branch = "bpos";
6433 else
6434 branch = "bge";
6435 break;
6436 case GT:
6437 branch = "bg";
6438 break;
6439 case LE:
6440 branch = "ble";
6441 break;
6442 case LT:
6443 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6444 branch = "bneg";
6445 else
6446 branch = "bl";
6447 break;
6448 case GEU:
6449 branch = "bgeu";
6450 break;
6451 case GTU:
6452 branch = "bgu";
6453 break;
6454 case LEU:
6455 branch = "bleu";
6456 break;
6457 case LTU:
6458 branch = "blu";
6459 break;
6460
6461 default:
6462 abort ();
6463 }
6464 strcpy (string, branch);
6465 }
6466 spaces -= strlen (branch);
6467 p = strchr (string, '\0');
6468
6469 /* Now add the annulling, the label, and a possible noop. */
6470 if (annul && ! far)
6471 {
6472 strcpy (p, ",a");
6473 p += 2;
6474 spaces -= 2;
6475 }
6476
6477 if (TARGET_V9)
6478 {
6479 rtx note;
6480 int v8 = 0;
6481
6482 if (! far && insn && INSN_ADDRESSES_SET_P ())
6483 {
6484 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6485 - INSN_ADDRESSES (INSN_UID (insn)));
6486 /* Leave some instructions for "slop". */
6487 if (delta < -260000 || delta >= 260000)
6488 v8 = 1;
6489 }
6490
6491 if (mode == CCFPmode || mode == CCFPEmode)
6492 {
6493 static char v9_fcc_labelno[] = "%%fccX, ";
6494 /* Set the char indicating the number of the fcc reg to use. */
6495 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6496 labelno = v9_fcc_labelno;
6497 if (v8)
6498 {
6499 if (REGNO (cc_reg) == SPARC_FCC_REG)
6500 labelno = "";
6501 else
6502 abort ();
6503 }
6504 }
6505 else if (mode == CCXmode || mode == CCX_NOOVmode)
6506 {
6507 labelno = "%%xcc, ";
6508 if (v8)
6509 abort ();
6510 }
6511 else
6512 {
6513 labelno = "%%icc, ";
6514 if (v8)
6515 labelno = "";
6516 }
6517
6518 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6519 {
6520 strcpy (p,
6521 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6522 ? ",pt" : ",pn");
6523 p += 3;
6524 spaces -= 3;
6525 }
6526 }
6527 else
6528 labelno = "";
6529
6530 if (spaces > 0)
6531 *p++ = '\t';
6532 else
6533 *p++ = ' ';
6534 strcpy (p, labelno);
6535 p = strchr (p, '\0');
6536 if (far)
6537 {
6538 strcpy (p, ".+12\n\t nop\n\tb\t");
6539 /* Skip the next insn if requested or
6540 if we know that it will be a nop. */
6541 if (annul || ! final_sequence)
6542 p[3] = '6';
6543 p += 14;
6544 }
6545 *p++ = '%';
6546 *p++ = 'l';
6547 *p++ = label + '0';
6548 *p++ = '%';
6549 *p++ = '#';
6550 *p = '\0';
6551
6552 return string;
6553 }
6554
6555 /* Emit a library call comparison between floating point X and Y.
6556 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6557 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6558 values as arguments instead of the TFmode registers themselves,
6559 that's why we cannot call emit_float_lib_cmp. */
6560 void
6561 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6562 {
6563 const char *qpfunc;
6564 rtx slot0, slot1, result, tem, tem2;
6565 enum machine_mode mode;
6566
6567 switch (comparison)
6568 {
6569 case EQ:
6570 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6571 break;
6572
6573 case NE:
6574 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6575 break;
6576
6577 case GT:
6578 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6579 break;
6580
6581 case GE:
6582 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6583 break;
6584
6585 case LT:
6586 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6587 break;
6588
6589 case LE:
6590 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6591 break;
6592
6593 case ORDERED:
6594 case UNORDERED:
6595 case UNGT:
6596 case UNLT:
6597 case UNEQ:
6598 case UNGE:
6599 case UNLE:
6600 case LTGT:
6601 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6602 break;
6603
6604 default:
6605 abort();
6606 break;
6607 }
6608
6609 if (TARGET_ARCH64)
6610 {
6611 if (GET_CODE (x) != MEM)
6612 {
6613 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6614 emit_insn (gen_rtx_SET (VOIDmode, slot0, x));
6615 }
6616 else
6617 slot0 = x;
6618
6619 if (GET_CODE (y) != MEM)
6620 {
6621 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6622 emit_insn (gen_rtx_SET (VOIDmode, slot1, y));
6623 }
6624 else
6625 slot1 = y;
6626
6627 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6628 DImode, 2,
6629 XEXP (slot0, 0), Pmode,
6630 XEXP (slot1, 0), Pmode);
6631
6632 mode = DImode;
6633 }
6634 else
6635 {
6636 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6637 SImode, 2,
6638 x, TFmode, y, TFmode);
6639
6640 mode = SImode;
6641 }
6642
6643
6644 /* Immediately move the result of the libcall into a pseudo
6645 register so reload doesn't clobber the value if it needs
6646 the return register for a spill reg. */
6647 result = gen_reg_rtx (mode);
6648 emit_move_insn (result, hard_libcall_value (mode));
6649
6650 switch (comparison)
6651 {
6652 default:
6653 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6654 break;
6655 case ORDERED:
6656 case UNORDERED:
6657 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6658 NULL_RTX, mode, 0);
6659 break;
6660 case UNGT:
6661 case UNGE:
6662 emit_cmp_insn (result, const1_rtx,
6663 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6664 break;
6665 case UNLE:
6666 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6667 break;
6668 case UNLT:
6669 tem = gen_reg_rtx (mode);
6670 if (TARGET_ARCH32)
6671 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6672 else
6673 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6674 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6675 break;
6676 case UNEQ:
6677 case LTGT:
6678 tem = gen_reg_rtx (mode);
6679 if (TARGET_ARCH32)
6680 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6681 else
6682 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6683 tem2 = gen_reg_rtx (mode);
6684 if (TARGET_ARCH32)
6685 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6686 else
6687 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6688 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6689 NULL_RTX, mode, 0);
6690 break;
6691 }
6692 }
6693
6694 /* Generate an unsigned DImode to FP conversion. This is the same code
6695 optabs would emit if we didn't have TFmode patterns. */
6696
6697 void
6698 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6699 {
6700 rtx neglab, donelab, i0, i1, f0, in, out;
6701
6702 out = operands[0];
6703 in = force_reg (DImode, operands[1]);
6704 neglab = gen_label_rtx ();
6705 donelab = gen_label_rtx ();
6706 i0 = gen_reg_rtx (DImode);
6707 i1 = gen_reg_rtx (DImode);
6708 f0 = gen_reg_rtx (mode);
6709
6710 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6711
6712 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6713 emit_jump_insn (gen_jump (donelab));
6714 emit_barrier ();
6715
6716 emit_label (neglab);
6717
6718 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6719 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6720 emit_insn (gen_iordi3 (i0, i0, i1));
6721 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6722 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6723
6724 emit_label (donelab);
6725 }
6726
6727 /* Generate an FP to unsigned DImode conversion. This is the same code
6728 optabs would emit if we didn't have TFmode patterns. */
6729
6730 void
6731 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6732 {
6733 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6734
6735 out = operands[0];
6736 in = force_reg (mode, operands[1]);
6737 neglab = gen_label_rtx ();
6738 donelab = gen_label_rtx ();
6739 i0 = gen_reg_rtx (DImode);
6740 i1 = gen_reg_rtx (DImode);
6741 limit = gen_reg_rtx (mode);
6742 f0 = gen_reg_rtx (mode);
6743
6744 emit_move_insn (limit,
6745 CONST_DOUBLE_FROM_REAL_VALUE (
6746 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6747 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6748
6749 emit_insn (gen_rtx_SET (VOIDmode,
6750 out,
6751 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6752 emit_jump_insn (gen_jump (donelab));
6753 emit_barrier ();
6754
6755 emit_label (neglab);
6756
6757 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6758 emit_insn (gen_rtx_SET (VOIDmode,
6759 i0,
6760 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6761 emit_insn (gen_movdi (i1, const1_rtx));
6762 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6763 emit_insn (gen_xordi3 (out, i0, i1));
6764
6765 emit_label (donelab);
6766 }
6767
6768 /* Return the string to output a conditional branch to LABEL, testing
6769 register REG. LABEL is the operand number of the label; REG is the
6770 operand number of the reg. OP is the conditional expression. The mode
6771 of REG says what kind of comparison we made.
6772
6773 DEST is the destination insn (i.e. the label), INSN is the source.
6774
6775 REVERSED is nonzero if we should reverse the sense of the comparison.
6776
6777 ANNUL is nonzero if we should generate an annulling branch. */
6778
6779 const char *
6780 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6781 int annul, rtx insn)
6782 {
6783 static char string[64];
6784 enum rtx_code code = GET_CODE (op);
6785 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6786 rtx note;
6787 int far;
6788 char *p;
6789
6790 /* branch on register are limited to +-128KB. If it is too far away,
6791 change
6792
6793 brnz,pt %g1, .LC30
6794
6795 to
6796
6797 brz,pn %g1, .+12
6798 nop
6799 ba,pt %xcc, .LC30
6800
6801 and
6802
6803 brgez,a,pn %o1, .LC29
6804
6805 to
6806
6807 brlz,pt %o1, .+16
6808 nop
6809 ba,pt %xcc, .LC29 */
6810
6811 far = get_attr_length (insn) >= 3;
6812
6813 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6814 if (reversed ^ far)
6815 code = reverse_condition (code);
6816
6817 /* Only 64 bit versions of these instructions exist. */
6818 if (mode != DImode)
6819 abort ();
6820
6821 /* Start by writing the branch condition. */
6822
6823 switch (code)
6824 {
6825 case NE:
6826 strcpy (string, "brnz");
6827 break;
6828
6829 case EQ:
6830 strcpy (string, "brz");
6831 break;
6832
6833 case GE:
6834 strcpy (string, "brgez");
6835 break;
6836
6837 case LT:
6838 strcpy (string, "brlz");
6839 break;
6840
6841 case LE:
6842 strcpy (string, "brlez");
6843 break;
6844
6845 case GT:
6846 strcpy (string, "brgz");
6847 break;
6848
6849 default:
6850 abort ();
6851 }
6852
6853 p = strchr (string, '\0');
6854
6855 /* Now add the annulling, reg, label, and nop. */
6856 if (annul && ! far)
6857 {
6858 strcpy (p, ",a");
6859 p += 2;
6860 }
6861
6862 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6863 {
6864 strcpy (p,
6865 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6866 ? ",pt" : ",pn");
6867 p += 3;
6868 }
6869
6870 *p = p < string + 8 ? '\t' : ' ';
6871 p++;
6872 *p++ = '%';
6873 *p++ = '0' + reg;
6874 *p++ = ',';
6875 *p++ = ' ';
6876 if (far)
6877 {
6878 int veryfar = 1, delta;
6879
6880 if (INSN_ADDRESSES_SET_P ())
6881 {
6882 delta = (INSN_ADDRESSES (INSN_UID (dest))
6883 - INSN_ADDRESSES (INSN_UID (insn)));
6884 /* Leave some instructions for "slop". */
6885 if (delta >= -260000 && delta < 260000)
6886 veryfar = 0;
6887 }
6888
6889 strcpy (p, ".+12\n\t nop\n\t");
6890 /* Skip the next insn if requested or
6891 if we know that it will be a nop. */
6892 if (annul || ! final_sequence)
6893 p[3] = '6';
6894 p += 12;
6895 if (veryfar)
6896 {
6897 strcpy (p, "b\t");
6898 p += 2;
6899 }
6900 else
6901 {
6902 strcpy (p, "ba,pt\t%%xcc, ");
6903 p += 13;
6904 }
6905 }
6906 *p++ = '%';
6907 *p++ = 'l';
6908 *p++ = '0' + label;
6909 *p++ = '%';
6910 *p++ = '#';
6911 *p = '\0';
6912
6913 return string;
6914 }
6915
6916 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6917 Such instructions cannot be used in the delay slot of return insn on v9.
6918 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6919 */
6920
6921 static int
6922 epilogue_renumber (register rtx *where, int test)
6923 {
6924 register const char *fmt;
6925 register int i;
6926 register enum rtx_code code;
6927
6928 if (*where == 0)
6929 return 0;
6930
6931 code = GET_CODE (*where);
6932
6933 switch (code)
6934 {
6935 case REG:
6936 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6937 return 1;
6938 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6939 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6940 case SCRATCH:
6941 case CC0:
6942 case PC:
6943 case CONST_INT:
6944 case CONST_DOUBLE:
6945 return 0;
6946
6947 /* Do not replace the frame pointer with the stack pointer because
6948 it can cause the delayed instruction to load below the stack.
6949 This occurs when instructions like:
6950
6951 (set (reg/i:SI 24 %i0)
6952 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6953 (const_int -20 [0xffffffec])) 0))
6954
6955 are in the return delayed slot. */
6956 case PLUS:
6957 if (GET_CODE (XEXP (*where, 0)) == REG
6958 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6959 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6960 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6961 return 1;
6962 break;
6963
6964 case MEM:
6965 if (SPARC_STACK_BIAS
6966 && GET_CODE (XEXP (*where, 0)) == REG
6967 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6968 return 1;
6969 break;
6970
6971 default:
6972 break;
6973 }
6974
6975 fmt = GET_RTX_FORMAT (code);
6976
6977 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6978 {
6979 if (fmt[i] == 'E')
6980 {
6981 register int j;
6982 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6983 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6984 return 1;
6985 }
6986 else if (fmt[i] == 'e'
6987 && epilogue_renumber (&(XEXP (*where, i)), test))
6988 return 1;
6989 }
6990 return 0;
6991 }
6992 \f
6993 /* Leaf functions and non-leaf functions have different needs. */
6994
6995 static const int
6996 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6997
6998 static const int
6999 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7000
7001 static const int *const reg_alloc_orders[] = {
7002 reg_leaf_alloc_order,
7003 reg_nonleaf_alloc_order};
7004
7005 void
7006 order_regs_for_local_alloc (void)
7007 {
7008 static int last_order_nonleaf = 1;
7009
7010 if (regs_ever_live[15] != last_order_nonleaf)
7011 {
7012 last_order_nonleaf = !last_order_nonleaf;
7013 memcpy ((char *) reg_alloc_order,
7014 (const char *) reg_alloc_orders[last_order_nonleaf],
7015 FIRST_PSEUDO_REGISTER * sizeof (int));
7016 }
7017 }
7018 \f
7019 /* Return 1 if REG and MEM are legitimate enough to allow the various
7020 mem<-->reg splits to be run. */
7021
7022 int
7023 sparc_splitdi_legitimate (rtx reg, rtx mem)
7024 {
7025 /* Punt if we are here by mistake. */
7026 if (! reload_completed)
7027 abort ();
7028
7029 /* We must have an offsettable memory reference. */
7030 if (! offsettable_memref_p (mem))
7031 return 0;
7032
7033 /* If we have legitimate args for ldd/std, we do not want
7034 the split to happen. */
7035 if ((REGNO (reg) % 2) == 0
7036 && mem_min_alignment (mem, 8))
7037 return 0;
7038
7039 /* Success. */
7040 return 1;
7041 }
7042
7043 /* Return 1 if x and y are some kind of REG and they refer to
7044 different hard registers. This test is guaranteed to be
7045 run after reload. */
7046
7047 int
7048 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7049 {
7050 if (GET_CODE (x) != REG)
7051 return 0;
7052 if (GET_CODE (y) != REG)
7053 return 0;
7054 if (REGNO (x) == REGNO (y))
7055 return 0;
7056 return 1;
7057 }
7058
7059 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7060 This makes them candidates for using ldd and std insns.
7061
7062 Note reg1 and reg2 *must* be hard registers. */
7063
7064 int
7065 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7066 {
7067 /* We might have been passed a SUBREG. */
7068 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7069 return 0;
7070
7071 if (REGNO (reg1) % 2 != 0)
7072 return 0;
7073
7074 /* Integer ldd is deprecated in SPARC V9 */
7075 if (TARGET_V9 && REGNO (reg1) < 32)
7076 return 0;
7077
7078 return (REGNO (reg1) == REGNO (reg2) - 1);
7079 }
7080
7081 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7082 an ldd or std insn.
7083
7084 This can only happen when addr1 and addr2, the addresses in mem1
7085 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7086 addr1 must also be aligned on a 64-bit boundary.
7087
7088 Also iff dependent_reg_rtx is not null it should not be used to
7089 compute the address for mem1, i.e. we cannot optimize a sequence
7090 like:
7091 ld [%o0], %o0
7092 ld [%o0 + 4], %o1
7093 to
7094 ldd [%o0], %o0
7095 nor:
7096 ld [%g3 + 4], %g3
7097 ld [%g3], %g2
7098 to
7099 ldd [%g3], %g2
7100
7101 But, note that the transformation from:
7102 ld [%g2 + 4], %g3
7103 ld [%g2], %g2
7104 to
7105 ldd [%g2], %g2
7106 is perfectly fine. Thus, the peephole2 patterns always pass us
7107 the destination register of the first load, never the second one.
7108
7109 For stores we don't have a similar problem, so dependent_reg_rtx is
7110 NULL_RTX. */
7111
7112 int
7113 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7114 {
7115 rtx addr1, addr2;
7116 unsigned int reg1;
7117 HOST_WIDE_INT offset1;
7118
7119 /* The mems cannot be volatile. */
7120 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7121 return 0;
7122
7123 /* MEM1 should be aligned on a 64-bit boundary. */
7124 if (MEM_ALIGN (mem1) < 64)
7125 return 0;
7126
7127 addr1 = XEXP (mem1, 0);
7128 addr2 = XEXP (mem2, 0);
7129
7130 /* Extract a register number and offset (if used) from the first addr. */
7131 if (GET_CODE (addr1) == PLUS)
7132 {
7133 /* If not a REG, return zero. */
7134 if (GET_CODE (XEXP (addr1, 0)) != REG)
7135 return 0;
7136 else
7137 {
7138 reg1 = REGNO (XEXP (addr1, 0));
7139 /* The offset must be constant! */
7140 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7141 return 0;
7142 offset1 = INTVAL (XEXP (addr1, 1));
7143 }
7144 }
7145 else if (GET_CODE (addr1) != REG)
7146 return 0;
7147 else
7148 {
7149 reg1 = REGNO (addr1);
7150 /* This was a simple (mem (reg)) expression. Offset is 0. */
7151 offset1 = 0;
7152 }
7153
7154 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7155 if (GET_CODE (addr2) != PLUS)
7156 return 0;
7157
7158 if (GET_CODE (XEXP (addr2, 0)) != REG
7159 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7160 return 0;
7161
7162 if (reg1 != REGNO (XEXP (addr2, 0)))
7163 return 0;
7164
7165 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7166 return 0;
7167
7168 /* The first offset must be evenly divisible by 8 to ensure the
7169 address is 64 bit aligned. */
7170 if (offset1 % 8 != 0)
7171 return 0;
7172
7173 /* The offset for the second addr must be 4 more than the first addr. */
7174 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7175 return 0;
7176
7177 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7178 instructions. */
7179 return 1;
7180 }
7181
7182 /* Return 1 if reg is a pseudo, or is the first register in
7183 a hard register pair. This makes it a candidate for use in
7184 ldd and std insns. */
7185
7186 int
7187 register_ok_for_ldd (rtx reg)
7188 {
7189 /* We might have been passed a SUBREG. */
7190 if (GET_CODE (reg) != REG)
7191 return 0;
7192
7193 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7194 return (REGNO (reg) % 2 == 0);
7195 else
7196 return 1;
7197 }
7198 \f
7199 /* Print operand X (an rtx) in assembler syntax to file FILE.
7200 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7201 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7202
7203 void
7204 print_operand (FILE *file, rtx x, int code)
7205 {
7206 switch (code)
7207 {
7208 case '#':
7209 /* Output an insn in a delay slot. */
7210 if (final_sequence)
7211 sparc_indent_opcode = 1;
7212 else
7213 fputs ("\n\t nop", file);
7214 return;
7215 case '*':
7216 /* Output an annul flag if there's nothing for the delay slot and we
7217 are optimizing. This is always used with '(' below.
7218 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7219 this is a dbx bug. So, we only do this when optimizing.
7220 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7221 Always emit a nop in case the next instruction is a branch. */
7222 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7223 fputs (",a", file);
7224 return;
7225 case '(':
7226 /* Output a 'nop' if there's nothing for the delay slot and we are
7227 not optimizing. This is always used with '*' above. */
7228 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7229 fputs ("\n\t nop", file);
7230 else if (final_sequence)
7231 sparc_indent_opcode = 1;
7232 return;
7233 case ')':
7234 /* Output the right displacement from the saved PC on function return.
7235 The caller may have placed an "unimp" insn immediately after the call
7236 so we have to account for it. This insn is used in the 32-bit ABI
7237 when calling a function that returns a non zero-sized structure. The
7238 64-bit ABI doesn't have it. Be careful to have this test be the same
7239 as that used on the call. */
7240 if (! TARGET_ARCH64
7241 && current_function_returns_struct
7242 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7243 == INTEGER_CST)
7244 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7245 fputs ("12", file);
7246 else
7247 fputc ('8', file);
7248 return;
7249 case '_':
7250 /* Output the Embedded Medium/Anywhere code model base register. */
7251 fputs (EMBMEDANY_BASE_REG, file);
7252 return;
7253 case '&':
7254 /* Print some local dynamic TLS name. */
7255 assemble_name (file, get_some_local_dynamic_name ());
7256 return;
7257
7258 case 'Y':
7259 /* Adjust the operand to take into account a RESTORE operation. */
7260 if (GET_CODE (x) == CONST_INT)
7261 break;
7262 else if (GET_CODE (x) != REG)
7263 output_operand_lossage ("invalid %%Y operand");
7264 else if (REGNO (x) < 8)
7265 fputs (reg_names[REGNO (x)], file);
7266 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7267 fputs (reg_names[REGNO (x)-16], file);
7268 else
7269 output_operand_lossage ("invalid %%Y operand");
7270 return;
7271 case 'L':
7272 /* Print out the low order register name of a register pair. */
7273 if (WORDS_BIG_ENDIAN)
7274 fputs (reg_names[REGNO (x)+1], file);
7275 else
7276 fputs (reg_names[REGNO (x)], file);
7277 return;
7278 case 'H':
7279 /* Print out the high order register name of a register pair. */
7280 if (WORDS_BIG_ENDIAN)
7281 fputs (reg_names[REGNO (x)], file);
7282 else
7283 fputs (reg_names[REGNO (x)+1], file);
7284 return;
7285 case 'R':
7286 /* Print out the second register name of a register pair or quad.
7287 I.e., R (%o0) => %o1. */
7288 fputs (reg_names[REGNO (x)+1], file);
7289 return;
7290 case 'S':
7291 /* Print out the third register name of a register quad.
7292 I.e., S (%o0) => %o2. */
7293 fputs (reg_names[REGNO (x)+2], file);
7294 return;
7295 case 'T':
7296 /* Print out the fourth register name of a register quad.
7297 I.e., T (%o0) => %o3. */
7298 fputs (reg_names[REGNO (x)+3], file);
7299 return;
7300 case 'x':
7301 /* Print a condition code register. */
7302 if (REGNO (x) == SPARC_ICC_REG)
7303 {
7304 /* We don't handle CC[X]_NOOVmode because they're not supposed
7305 to occur here. */
7306 if (GET_MODE (x) == CCmode)
7307 fputs ("%icc", file);
7308 else if (GET_MODE (x) == CCXmode)
7309 fputs ("%xcc", file);
7310 else
7311 abort ();
7312 }
7313 else
7314 /* %fccN register */
7315 fputs (reg_names[REGNO (x)], file);
7316 return;
7317 case 'm':
7318 /* Print the operand's address only. */
7319 output_address (XEXP (x, 0));
7320 return;
7321 case 'r':
7322 /* In this case we need a register. Use %g0 if the
7323 operand is const0_rtx. */
7324 if (x == const0_rtx
7325 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7326 {
7327 fputs ("%g0", file);
7328 return;
7329 }
7330 else
7331 break;
7332
7333 case 'A':
7334 switch (GET_CODE (x))
7335 {
7336 case IOR: fputs ("or", file); break;
7337 case AND: fputs ("and", file); break;
7338 case XOR: fputs ("xor", file); break;
7339 default: output_operand_lossage ("invalid %%A operand");
7340 }
7341 return;
7342
7343 case 'B':
7344 switch (GET_CODE (x))
7345 {
7346 case IOR: fputs ("orn", file); break;
7347 case AND: fputs ("andn", file); break;
7348 case XOR: fputs ("xnor", file); break;
7349 default: output_operand_lossage ("invalid %%B operand");
7350 }
7351 return;
7352
7353 /* These are used by the conditional move instructions. */
7354 case 'c' :
7355 case 'C':
7356 {
7357 enum rtx_code rc = GET_CODE (x);
7358
7359 if (code == 'c')
7360 {
7361 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7362 if (mode == CCFPmode || mode == CCFPEmode)
7363 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7364 else
7365 rc = reverse_condition (GET_CODE (x));
7366 }
7367 switch (rc)
7368 {
7369 case NE: fputs ("ne", file); break;
7370 case EQ: fputs ("e", file); break;
7371 case GE: fputs ("ge", file); break;
7372 case GT: fputs ("g", file); break;
7373 case LE: fputs ("le", file); break;
7374 case LT: fputs ("l", file); break;
7375 case GEU: fputs ("geu", file); break;
7376 case GTU: fputs ("gu", file); break;
7377 case LEU: fputs ("leu", file); break;
7378 case LTU: fputs ("lu", file); break;
7379 case LTGT: fputs ("lg", file); break;
7380 case UNORDERED: fputs ("u", file); break;
7381 case ORDERED: fputs ("o", file); break;
7382 case UNLT: fputs ("ul", file); break;
7383 case UNLE: fputs ("ule", file); break;
7384 case UNGT: fputs ("ug", file); break;
7385 case UNGE: fputs ("uge", file); break;
7386 case UNEQ: fputs ("ue", file); break;
7387 default: output_operand_lossage (code == 'c'
7388 ? "invalid %%c operand"
7389 : "invalid %%C operand");
7390 }
7391 return;
7392 }
7393
7394 /* These are used by the movr instruction pattern. */
7395 case 'd':
7396 case 'D':
7397 {
7398 enum rtx_code rc = (code == 'd'
7399 ? reverse_condition (GET_CODE (x))
7400 : GET_CODE (x));
7401 switch (rc)
7402 {
7403 case NE: fputs ("ne", file); break;
7404 case EQ: fputs ("e", file); break;
7405 case GE: fputs ("gez", file); break;
7406 case LT: fputs ("lz", file); break;
7407 case LE: fputs ("lez", file); break;
7408 case GT: fputs ("gz", file); break;
7409 default: output_operand_lossage (code == 'd'
7410 ? "invalid %%d operand"
7411 : "invalid %%D operand");
7412 }
7413 return;
7414 }
7415
7416 case 'b':
7417 {
7418 /* Print a sign-extended character. */
7419 int i = trunc_int_for_mode (INTVAL (x), QImode);
7420 fprintf (file, "%d", i);
7421 return;
7422 }
7423
7424 case 'f':
7425 /* Operand must be a MEM; write its address. */
7426 if (GET_CODE (x) != MEM)
7427 output_operand_lossage ("invalid %%f operand");
7428 output_address (XEXP (x, 0));
7429 return;
7430
7431 case 's':
7432 {
7433 /* Print a sign-extended 32-bit value. */
7434 HOST_WIDE_INT i;
7435 if (GET_CODE(x) == CONST_INT)
7436 i = INTVAL (x);
7437 else if (GET_CODE(x) == CONST_DOUBLE)
7438 i = CONST_DOUBLE_LOW (x);
7439 else
7440 {
7441 output_operand_lossage ("invalid %%s operand");
7442 return;
7443 }
7444 i = trunc_int_for_mode (i, SImode);
7445 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7446 return;
7447 }
7448
7449 case 0:
7450 /* Do nothing special. */
7451 break;
7452
7453 default:
7454 /* Undocumented flag. */
7455 output_operand_lossage ("invalid operand output code");
7456 }
7457
7458 if (GET_CODE (x) == REG)
7459 fputs (reg_names[REGNO (x)], file);
7460 else if (GET_CODE (x) == MEM)
7461 {
7462 fputc ('[', file);
7463 /* Poor Sun assembler doesn't understand absolute addressing. */
7464 if (CONSTANT_P (XEXP (x, 0)))
7465 fputs ("%g0+", file);
7466 output_address (XEXP (x, 0));
7467 fputc (']', file);
7468 }
7469 else if (GET_CODE (x) == HIGH)
7470 {
7471 fputs ("%hi(", file);
7472 output_addr_const (file, XEXP (x, 0));
7473 fputc (')', file);
7474 }
7475 else if (GET_CODE (x) == LO_SUM)
7476 {
7477 print_operand (file, XEXP (x, 0), 0);
7478 if (TARGET_CM_MEDMID)
7479 fputs ("+%l44(", file);
7480 else
7481 fputs ("+%lo(", file);
7482 output_addr_const (file, XEXP (x, 1));
7483 fputc (')', file);
7484 }
7485 else if (GET_CODE (x) == CONST_DOUBLE
7486 && (GET_MODE (x) == VOIDmode
7487 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7488 {
7489 if (CONST_DOUBLE_HIGH (x) == 0)
7490 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7491 else if (CONST_DOUBLE_HIGH (x) == -1
7492 && CONST_DOUBLE_LOW (x) < 0)
7493 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7494 else
7495 output_operand_lossage ("long long constant not a valid immediate operand");
7496 }
7497 else if (GET_CODE (x) == CONST_DOUBLE)
7498 output_operand_lossage ("floating point constant not a valid immediate operand");
7499 else { output_addr_const (file, x); }
7500 }
7501 \f
7502 /* Target hook for assembling integer objects. The sparc version has
7503 special handling for aligned DI-mode objects. */
7504
7505 static bool
7506 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7507 {
7508 /* ??? We only output .xword's for symbols and only then in environments
7509 where the assembler can handle them. */
7510 if (aligned_p && size == 8
7511 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7512 {
7513 if (TARGET_V9)
7514 {
7515 assemble_integer_with_op ("\t.xword\t", x);
7516 return true;
7517 }
7518 else
7519 {
7520 assemble_aligned_integer (4, const0_rtx);
7521 assemble_aligned_integer (4, x);
7522 return true;
7523 }
7524 }
7525 return default_assemble_integer (x, size, aligned_p);
7526 }
7527 \f
7528 /* Return the value of a code used in the .proc pseudo-op that says
7529 what kind of result this function returns. For non-C types, we pick
7530 the closest C type. */
7531
7532 #ifndef SHORT_TYPE_SIZE
7533 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7534 #endif
7535
7536 #ifndef INT_TYPE_SIZE
7537 #define INT_TYPE_SIZE BITS_PER_WORD
7538 #endif
7539
7540 #ifndef LONG_TYPE_SIZE
7541 #define LONG_TYPE_SIZE BITS_PER_WORD
7542 #endif
7543
7544 #ifndef LONG_LONG_TYPE_SIZE
7545 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7546 #endif
7547
7548 #ifndef FLOAT_TYPE_SIZE
7549 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7550 #endif
7551
7552 #ifndef DOUBLE_TYPE_SIZE
7553 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7554 #endif
7555
7556 #ifndef LONG_DOUBLE_TYPE_SIZE
7557 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7558 #endif
7559
7560 unsigned long
7561 sparc_type_code (register tree type)
7562 {
7563 register unsigned long qualifiers = 0;
7564 register unsigned shift;
7565
7566 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7567 setting more, since some assemblers will give an error for this. Also,
7568 we must be careful to avoid shifts of 32 bits or more to avoid getting
7569 unpredictable results. */
7570
7571 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7572 {
7573 switch (TREE_CODE (type))
7574 {
7575 case ERROR_MARK:
7576 return qualifiers;
7577
7578 case ARRAY_TYPE:
7579 qualifiers |= (3 << shift);
7580 break;
7581
7582 case FUNCTION_TYPE:
7583 case METHOD_TYPE:
7584 qualifiers |= (2 << shift);
7585 break;
7586
7587 case POINTER_TYPE:
7588 case REFERENCE_TYPE:
7589 case OFFSET_TYPE:
7590 qualifiers |= (1 << shift);
7591 break;
7592
7593 case RECORD_TYPE:
7594 return (qualifiers | 8);
7595
7596 case UNION_TYPE:
7597 case QUAL_UNION_TYPE:
7598 return (qualifiers | 9);
7599
7600 case ENUMERAL_TYPE:
7601 return (qualifiers | 10);
7602
7603 case VOID_TYPE:
7604 return (qualifiers | 16);
7605
7606 case INTEGER_TYPE:
7607 /* If this is a range type, consider it to be the underlying
7608 type. */
7609 if (TREE_TYPE (type) != 0)
7610 break;
7611
7612 /* Carefully distinguish all the standard types of C,
7613 without messing up if the language is not C. We do this by
7614 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7615 look at both the names and the above fields, but that's redundant.
7616 Any type whose size is between two C types will be considered
7617 to be the wider of the two types. Also, we do not have a
7618 special code to use for "long long", so anything wider than
7619 long is treated the same. Note that we can't distinguish
7620 between "int" and "long" in this code if they are the same
7621 size, but that's fine, since neither can the assembler. */
7622
7623 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7624 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7625
7626 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7627 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7628
7629 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7630 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7631
7632 else
7633 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7634
7635 case REAL_TYPE:
7636 /* If this is a range type, consider it to be the underlying
7637 type. */
7638 if (TREE_TYPE (type) != 0)
7639 break;
7640
7641 /* Carefully distinguish all the standard types of C,
7642 without messing up if the language is not C. */
7643
7644 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7645 return (qualifiers | 6);
7646
7647 else
7648 return (qualifiers | 7);
7649
7650 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7651 /* ??? We need to distinguish between double and float complex types,
7652 but I don't know how yet because I can't reach this code from
7653 existing front-ends. */
7654 return (qualifiers | 7); /* Who knows? */
7655
7656 case VECTOR_TYPE:
7657 case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
7658 case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
7659 case FILE_TYPE: /* GNU Pascal FILE type. */
7660 case SET_TYPE: /* GNU Pascal SET type. */
7661 case LANG_TYPE: /* ? */
7662 return qualifiers;
7663
7664 default:
7665 abort (); /* Not a type! */
7666 }
7667 }
7668
7669 return qualifiers;
7670 }
7671 \f
7672 /* Nested function support. */
7673
7674 /* Emit RTL insns to initialize the variable parts of a trampoline.
7675 FNADDR is an RTX for the address of the function's pure code.
7676 CXT is an RTX for the static chain value for the function.
7677
7678 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7679 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7680 (to store insns). This is a bit excessive. Perhaps a different
7681 mechanism would be better here.
7682
7683 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7684
7685 void
7686 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7687 {
7688 /* SPARC 32-bit trampoline:
7689
7690 sethi %hi(fn), %g1
7691 sethi %hi(static), %g2
7692 jmp %g1+%lo(fn)
7693 or %g2, %lo(static), %g2
7694
7695 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7696 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7697 */
7698
7699 emit_move_insn
7700 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7701 expand_binop (SImode, ior_optab,
7702 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7703 size_int (10), 0, 1),
7704 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7705 NULL_RTX, 1, OPTAB_DIRECT));
7706
7707 emit_move_insn
7708 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7709 expand_binop (SImode, ior_optab,
7710 expand_shift (RSHIFT_EXPR, SImode, cxt,
7711 size_int (10), 0, 1),
7712 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7713 NULL_RTX, 1, OPTAB_DIRECT));
7714
7715 emit_move_insn
7716 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7717 expand_binop (SImode, ior_optab,
7718 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7719 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7720 NULL_RTX, 1, OPTAB_DIRECT));
7721
7722 emit_move_insn
7723 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7724 expand_binop (SImode, ior_optab,
7725 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7726 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7727 NULL_RTX, 1, OPTAB_DIRECT));
7728
7729 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7730 aligned on a 16 byte boundary so one flush clears it all. */
7731 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7732 if (sparc_cpu != PROCESSOR_ULTRASPARC
7733 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7734 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7735 plus_constant (tramp, 8)))));
7736
7737 /* Call __enable_execute_stack after writing onto the stack to make sure
7738 the stack address is accessible. */
7739 #ifdef ENABLE_EXECUTE_STACK
7740 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7741 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7742 #endif
7743
7744 }
7745
7746 /* The 64-bit version is simpler because it makes more sense to load the
7747 values as "immediate" data out of the trampoline. It's also easier since
7748 we can read the PC without clobbering a register. */
7749
7750 void
7751 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7752 {
7753 /* SPARC 64-bit trampoline:
7754
7755 rd %pc, %g1
7756 ldx [%g1+24], %g5
7757 jmp %g5
7758 ldx [%g1+16], %g5
7759 +16 bytes data
7760 */
7761
7762 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7763 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7764 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7765 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7766 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7767 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7768 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7769 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7770 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7771 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7772 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7773
7774 if (sparc_cpu != PROCESSOR_ULTRASPARC
7775 && sparc_cpu != PROCESSOR_ULTRASPARC3)
7776 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7777
7778 /* Call __enable_execute_stack after writing onto the stack to make sure
7779 the stack address is accessible. */
7780 #ifdef ENABLE_EXECUTE_STACK
7781 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7782 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7783 #endif
7784 }
7785 \f
7786 /* Adjust the cost of a scheduling dependency. Return the new cost of
7787 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7788
7789 static int
7790 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7791 {
7792 enum attr_type insn_type;
7793
7794 if (! recog_memoized (insn))
7795 return 0;
7796
7797 insn_type = get_attr_type (insn);
7798
7799 if (REG_NOTE_KIND (link) == 0)
7800 {
7801 /* Data dependency; DEP_INSN writes a register that INSN reads some
7802 cycles later. */
7803
7804 /* if a load, then the dependence must be on the memory address;
7805 add an extra "cycle". Note that the cost could be two cycles
7806 if the reg was written late in an instruction group; we ca not tell
7807 here. */
7808 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7809 return cost + 3;
7810
7811 /* Get the delay only if the address of the store is the dependence. */
7812 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7813 {
7814 rtx pat = PATTERN(insn);
7815 rtx dep_pat = PATTERN (dep_insn);
7816
7817 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7818 return cost; /* This should not happen! */
7819
7820 /* The dependency between the two instructions was on the data that
7821 is being stored. Assume that this implies that the address of the
7822 store is not dependent. */
7823 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7824 return cost;
7825
7826 return cost + 3; /* An approximation. */
7827 }
7828
7829 /* A shift instruction cannot receive its data from an instruction
7830 in the same cycle; add a one cycle penalty. */
7831 if (insn_type == TYPE_SHIFT)
7832 return cost + 3; /* Split before cascade into shift. */
7833 }
7834 else
7835 {
7836 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7837 INSN writes some cycles later. */
7838
7839 /* These are only significant for the fpu unit; writing a fp reg before
7840 the fpu has finished with it stalls the processor. */
7841
7842 /* Reusing an integer register causes no problems. */
7843 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7844 return 0;
7845 }
7846
7847 return cost;
7848 }
7849
7850 static int
7851 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7852 {
7853 enum attr_type insn_type, dep_type;
7854 rtx pat = PATTERN(insn);
7855 rtx dep_pat = PATTERN (dep_insn);
7856
7857 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7858 return cost;
7859
7860 insn_type = get_attr_type (insn);
7861 dep_type = get_attr_type (dep_insn);
7862
7863 switch (REG_NOTE_KIND (link))
7864 {
7865 case 0:
7866 /* Data dependency; DEP_INSN writes a register that INSN reads some
7867 cycles later. */
7868
7869 switch (insn_type)
7870 {
7871 case TYPE_STORE:
7872 case TYPE_FPSTORE:
7873 /* Get the delay iff the address of the store is the dependence. */
7874 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7875 return cost;
7876
7877 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7878 return cost;
7879 return cost + 3;
7880
7881 case TYPE_LOAD:
7882 case TYPE_SLOAD:
7883 case TYPE_FPLOAD:
7884 /* If a load, then the dependence must be on the memory address. If
7885 the addresses aren't equal, then it might be a false dependency */
7886 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7887 {
7888 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7889 || GET_CODE (SET_DEST (dep_pat)) != MEM
7890 || GET_CODE (SET_SRC (pat)) != MEM
7891 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7892 XEXP (SET_SRC (pat), 0)))
7893 return cost + 2;
7894
7895 return cost + 8;
7896 }
7897 break;
7898
7899 case TYPE_BRANCH:
7900 /* Compare to branch latency is 0. There is no benefit from
7901 separating compare and branch. */
7902 if (dep_type == TYPE_COMPARE)
7903 return 0;
7904 /* Floating point compare to branch latency is less than
7905 compare to conditional move. */
7906 if (dep_type == TYPE_FPCMP)
7907 return cost - 1;
7908 break;
7909 default:
7910 break;
7911 }
7912 break;
7913
7914 case REG_DEP_ANTI:
7915 /* Anti-dependencies only penalize the fpu unit. */
7916 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7917 return 0;
7918 break;
7919
7920 default:
7921 break;
7922 }
7923
7924 return cost;
7925 }
7926
7927 static int
7928 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7929 {
7930 switch (sparc_cpu)
7931 {
7932 case PROCESSOR_SUPERSPARC:
7933 cost = supersparc_adjust_cost (insn, link, dep, cost);
7934 break;
7935 case PROCESSOR_HYPERSPARC:
7936 case PROCESSOR_SPARCLITE86X:
7937 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7938 break;
7939 default:
7940 break;
7941 }
7942 return cost;
7943 }
7944
7945 static void
7946 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7947 int sched_verbose ATTRIBUTE_UNUSED,
7948 int max_ready ATTRIBUTE_UNUSED)
7949 {
7950 }
7951
7952 static int
7953 sparc_use_sched_lookahead (void)
7954 {
7955 if (sparc_cpu == PROCESSOR_ULTRASPARC
7956 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7957 return 4;
7958 if ((1 << sparc_cpu) &
7959 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7960 (1 << PROCESSOR_SPARCLITE86X)))
7961 return 3;
7962 return 0;
7963 }
7964
7965 static int
7966 sparc_issue_rate (void)
7967 {
7968 switch (sparc_cpu)
7969 {
7970 default:
7971 return 1;
7972 case PROCESSOR_V9:
7973 /* Assume V9 processors are capable of at least dual-issue. */
7974 return 2;
7975 case PROCESSOR_SUPERSPARC:
7976 return 3;
7977 case PROCESSOR_HYPERSPARC:
7978 case PROCESSOR_SPARCLITE86X:
7979 return 2;
7980 case PROCESSOR_ULTRASPARC:
7981 case PROCESSOR_ULTRASPARC3:
7982 return 4;
7983 }
7984 }
7985
7986 static int
7987 set_extends (rtx insn)
7988 {
7989 register rtx pat = PATTERN (insn);
7990
7991 switch (GET_CODE (SET_SRC (pat)))
7992 {
7993 /* Load and some shift instructions zero extend. */
7994 case MEM:
7995 case ZERO_EXTEND:
7996 /* sethi clears the high bits */
7997 case HIGH:
7998 /* LO_SUM is used with sethi. sethi cleared the high
7999 bits and the values used with lo_sum are positive */
8000 case LO_SUM:
8001 /* Store flag stores 0 or 1 */
8002 case LT: case LTU:
8003 case GT: case GTU:
8004 case LE: case LEU:
8005 case GE: case GEU:
8006 case EQ:
8007 case NE:
8008 return 1;
8009 case AND:
8010 {
8011 rtx op0 = XEXP (SET_SRC (pat), 0);
8012 rtx op1 = XEXP (SET_SRC (pat), 1);
8013 if (GET_CODE (op1) == CONST_INT)
8014 return INTVAL (op1) >= 0;
8015 if (GET_CODE (op0) != REG)
8016 return 0;
8017 if (sparc_check_64 (op0, insn) == 1)
8018 return 1;
8019 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8020 }
8021 case IOR:
8022 case XOR:
8023 {
8024 rtx op0 = XEXP (SET_SRC (pat), 0);
8025 rtx op1 = XEXP (SET_SRC (pat), 1);
8026 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8027 return 0;
8028 if (GET_CODE (op1) == CONST_INT)
8029 return INTVAL (op1) >= 0;
8030 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8031 }
8032 case LSHIFTRT:
8033 return GET_MODE (SET_SRC (pat)) == SImode;
8034 /* Positive integers leave the high bits zero. */
8035 case CONST_DOUBLE:
8036 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8037 case CONST_INT:
8038 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8039 case ASHIFTRT:
8040 case SIGN_EXTEND:
8041 return - (GET_MODE (SET_SRC (pat)) == SImode);
8042 case REG:
8043 return sparc_check_64 (SET_SRC (pat), insn);
8044 default:
8045 return 0;
8046 }
8047 }
8048
8049 /* We _ought_ to have only one kind per function, but... */
8050 static GTY(()) rtx sparc_addr_diff_list;
8051 static GTY(()) rtx sparc_addr_list;
8052
8053 void
8054 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8055 {
8056 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8057 if (diff)
8058 sparc_addr_diff_list
8059 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8060 else
8061 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8062 }
8063
8064 static void
8065 sparc_output_addr_vec (rtx vec)
8066 {
8067 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8068 int idx, vlen = XVECLEN (body, 0);
8069
8070 #ifdef ASM_OUTPUT_ADDR_VEC_START
8071 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8072 #endif
8073
8074 #ifdef ASM_OUTPUT_CASE_LABEL
8075 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8076 NEXT_INSN (lab));
8077 #else
8078 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8079 #endif
8080
8081 for (idx = 0; idx < vlen; idx++)
8082 {
8083 ASM_OUTPUT_ADDR_VEC_ELT
8084 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8085 }
8086
8087 #ifdef ASM_OUTPUT_ADDR_VEC_END
8088 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8089 #endif
8090 }
8091
8092 static void
8093 sparc_output_addr_diff_vec (rtx vec)
8094 {
8095 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8096 rtx base = XEXP (XEXP (body, 0), 0);
8097 int idx, vlen = XVECLEN (body, 1);
8098
8099 #ifdef ASM_OUTPUT_ADDR_VEC_START
8100 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8101 #endif
8102
8103 #ifdef ASM_OUTPUT_CASE_LABEL
8104 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8105 NEXT_INSN (lab));
8106 #else
8107 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8108 #endif
8109
8110 for (idx = 0; idx < vlen; idx++)
8111 {
8112 ASM_OUTPUT_ADDR_DIFF_ELT
8113 (asm_out_file,
8114 body,
8115 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8116 CODE_LABEL_NUMBER (base));
8117 }
8118
8119 #ifdef ASM_OUTPUT_ADDR_VEC_END
8120 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8121 #endif
8122 }
8123
8124 static void
8125 sparc_output_deferred_case_vectors (void)
8126 {
8127 rtx t;
8128 int align;
8129
8130 if (sparc_addr_list == NULL_RTX
8131 && sparc_addr_diff_list == NULL_RTX)
8132 return;
8133
8134 /* Align to cache line in the function's code section. */
8135 function_section (current_function_decl);
8136
8137 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8138 if (align > 0)
8139 ASM_OUTPUT_ALIGN (asm_out_file, align);
8140
8141 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8142 sparc_output_addr_vec (XEXP (t, 0));
8143 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8144 sparc_output_addr_diff_vec (XEXP (t, 0));
8145
8146 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8147 }
8148
8149 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8150 unknown. Return 1 if the high bits are zero, -1 if the register is
8151 sign extended. */
8152 int
8153 sparc_check_64 (rtx x, rtx insn)
8154 {
8155 /* If a register is set only once it is safe to ignore insns this
8156 code does not know how to handle. The loop will either recognize
8157 the single set and return the correct value or fail to recognize
8158 it and return 0. */
8159 int set_once = 0;
8160 rtx y = x;
8161
8162 if (GET_CODE (x) != REG)
8163 abort ();
8164
8165 if (GET_MODE (x) == DImode)
8166 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8167
8168 if (flag_expensive_optimizations
8169 && REG_N_SETS (REGNO (y)) == 1)
8170 set_once = 1;
8171
8172 if (insn == 0)
8173 {
8174 if (set_once)
8175 insn = get_last_insn_anywhere ();
8176 else
8177 return 0;
8178 }
8179
8180 while ((insn = PREV_INSN (insn)))
8181 {
8182 switch (GET_CODE (insn))
8183 {
8184 case JUMP_INSN:
8185 case NOTE:
8186 break;
8187 case CODE_LABEL:
8188 case CALL_INSN:
8189 default:
8190 if (! set_once)
8191 return 0;
8192 break;
8193 case INSN:
8194 {
8195 rtx pat = PATTERN (insn);
8196 if (GET_CODE (pat) != SET)
8197 return 0;
8198 if (rtx_equal_p (x, SET_DEST (pat)))
8199 return set_extends (insn);
8200 if (y && rtx_equal_p (y, SET_DEST (pat)))
8201 return set_extends (insn);
8202 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8203 return 0;
8204 }
8205 }
8206 }
8207 return 0;
8208 }
8209
8210 /* Returns assembly code to perform a DImode shift using
8211 a 64-bit global or out register on SPARC-V8+. */
8212 const char *
8213 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8214 {
8215 static char asm_code[60];
8216
8217 /* The scratch register is only required when the destination
8218 register is not a 64-bit global or out register. */
8219 if (which_alternative != 2)
8220 operands[3] = operands[0];
8221
8222 /* We can only shift by constants <= 63. */
8223 if (GET_CODE (operands[2]) == CONST_INT)
8224 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8225
8226 if (GET_CODE (operands[1]) == CONST_INT)
8227 {
8228 output_asm_insn ("mov\t%1, %3", operands);
8229 }
8230 else
8231 {
8232 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8233 if (sparc_check_64 (operands[1], insn) <= 0)
8234 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8235 output_asm_insn ("or\t%L1, %3, %3", operands);
8236 }
8237
8238 strcpy(asm_code, opcode);
8239
8240 if (which_alternative != 2)
8241 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8242 else
8243 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8244 }
8245 \f
8246 /* Output rtl to increment the profiler label LABELNO
8247 for profiling a function entry. */
8248
8249 void
8250 sparc_profile_hook (int labelno)
8251 {
8252 char buf[32];
8253 rtx lab, fun;
8254
8255 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8256 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8257 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8258
8259 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8260 }
8261 \f
8262 #ifdef OBJECT_FORMAT_ELF
8263 static void
8264 sparc_elf_asm_named_section (const char *name, unsigned int flags,
8265 tree decl)
8266 {
8267 if (flags & SECTION_MERGE)
8268 {
8269 /* entsize cannot be expressed in this section attributes
8270 encoding style. */
8271 default_elf_asm_named_section (name, flags, decl);
8272 return;
8273 }
8274
8275 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8276
8277 if (!(flags & SECTION_DEBUG))
8278 fputs (",#alloc", asm_out_file);
8279 if (flags & SECTION_WRITE)
8280 fputs (",#write", asm_out_file);
8281 if (flags & SECTION_TLS)
8282 fputs (",#tls", asm_out_file);
8283 if (flags & SECTION_CODE)
8284 fputs (",#execinstr", asm_out_file);
8285
8286 /* ??? Handle SECTION_BSS. */
8287
8288 fputc ('\n', asm_out_file);
8289 }
8290 #endif /* OBJECT_FORMAT_ELF */
8291
8292 /* We do not allow indirect calls to be optimized into sibling calls.
8293
8294 We cannot use sibling calls when delayed branches are disabled
8295 because they will likely require the call delay slot to be filled.
8296
8297 Also, on SPARC 32-bit we cannot emit a sibling call when the
8298 current function returns a structure. This is because the "unimp
8299 after call" convention would cause the callee to return to the
8300 wrong place. The generic code already disallows cases where the
8301 function being called returns a structure.
8302
8303 It may seem strange how this last case could occur. Usually there
8304 is code after the call which jumps to epilogue code which dumps the
8305 return value into the struct return area. That ought to invalidate
8306 the sibling call right? Well, in the C++ case we can end up passing
8307 the pointer to the struct return area to a constructor (which returns
8308 void) and then nothing else happens. Such a sibling call would look
8309 valid without the added check here. */
8310 static bool
8311 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8312 {
8313 return (decl
8314 && flag_delayed_branch
8315 && (TARGET_ARCH64 || ! current_function_returns_struct));
8316 }
8317 \f
8318 /* libfunc renaming. */
8319 #include "config/gofast.h"
8320
8321 static void
8322 sparc_init_libfuncs (void)
8323 {
8324 if (TARGET_ARCH32)
8325 {
8326 /* Use the subroutines that Sun's library provides for integer
8327 multiply and divide. The `*' prevents an underscore from
8328 being prepended by the compiler. .umul is a little faster
8329 than .mul. */
8330 set_optab_libfunc (smul_optab, SImode, "*.umul");
8331 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8332 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8333 set_optab_libfunc (smod_optab, SImode, "*.rem");
8334 set_optab_libfunc (umod_optab, SImode, "*.urem");
8335
8336 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8337 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8338 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8339 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8340 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8341 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8342
8343 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8344 is because with soft-float, the SFmode and DFmode sqrt
8345 instructions will be absent, and the compiler will notice and
8346 try to use the TFmode sqrt instruction for calls to the
8347 builtin function sqrt, but this fails. */
8348 if (TARGET_FPU)
8349 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8350
8351 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8352 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8353 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8354 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8355 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8356 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8357
8358 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8359 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8360 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8361 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8362
8363 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8364 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8365 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8366
8367 if (DITF_CONVERSION_LIBFUNCS)
8368 {
8369 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8370 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8371 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8372 }
8373
8374 if (SUN_CONVERSION_LIBFUNCS)
8375 {
8376 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8377 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8378 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8379 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8380 }
8381 }
8382 if (TARGET_ARCH64)
8383 {
8384 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8385 do not exist in the library. Make sure the compiler does not
8386 emit calls to them by accident. (It should always use the
8387 hardware instructions.) */
8388 set_optab_libfunc (smul_optab, SImode, 0);
8389 set_optab_libfunc (sdiv_optab, SImode, 0);
8390 set_optab_libfunc (udiv_optab, SImode, 0);
8391 set_optab_libfunc (smod_optab, SImode, 0);
8392 set_optab_libfunc (umod_optab, SImode, 0);
8393
8394 if (SUN_INTEGER_MULTIPLY_64)
8395 {
8396 set_optab_libfunc (smul_optab, DImode, "__mul64");
8397 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8398 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8399 set_optab_libfunc (smod_optab, DImode, "__rem64");
8400 set_optab_libfunc (umod_optab, DImode, "__urem64");
8401 }
8402
8403 if (SUN_CONVERSION_LIBFUNCS)
8404 {
8405 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8406 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8407 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8408 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8409 }
8410 }
8411
8412 gofast_maybe_init_libfuncs ();
8413 }
8414 \f
8415 int
8416 sparc_extra_constraint_check (rtx op, int c, int strict)
8417 {
8418 int reload_ok_mem;
8419
8420 if (TARGET_ARCH64
8421 && (c == 'T' || c == 'U'))
8422 return 0;
8423
8424 switch (c)
8425 {
8426 case 'Q':
8427 return fp_sethi_p (op);
8428
8429 case 'R':
8430 return fp_mov_p (op);
8431
8432 case 'S':
8433 return fp_high_losum_p (op);
8434
8435 case 'U':
8436 if (! strict
8437 || (GET_CODE (op) == REG
8438 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8439 || reg_renumber[REGNO (op)] >= 0)))
8440 return register_ok_for_ldd (op);
8441
8442 return 0;
8443
8444 case 'W':
8445 case 'T':
8446 break;
8447
8448 default:
8449 return 0;
8450 }
8451
8452 /* Our memory extra constraints have to emulate the
8453 behavior of 'm' and 'o' in order for reload to work
8454 correctly. */
8455 if (GET_CODE (op) == MEM)
8456 {
8457 reload_ok_mem = 0;
8458 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8459 && (! strict
8460 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8461 reload_ok_mem = 1;
8462 }
8463 else
8464 {
8465 reload_ok_mem = (reload_in_progress
8466 && GET_CODE (op) == REG
8467 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8468 && reg_renumber [REGNO (op)] < 0);
8469 }
8470
8471 return reload_ok_mem;
8472 }
8473
8474 /* ??? This duplicates information provided to the compiler by the
8475 ??? scheduler description. Some day, teach genautomata to output
8476 ??? the latencies and then CSE will just use that. */
8477
8478 static bool
8479 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8480 {
8481 enum machine_mode mode = GET_MODE (x);
8482 bool float_mode_p = FLOAT_MODE_P (mode);
8483
8484 switch (code)
8485 {
8486 case CONST_INT:
8487 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8488 {
8489 *total = 0;
8490 return true;
8491 }
8492 /* FALLTHRU */
8493
8494 case HIGH:
8495 *total = 2;
8496 return true;
8497
8498 case CONST:
8499 case LABEL_REF:
8500 case SYMBOL_REF:
8501 *total = 4;
8502 return true;
8503
8504 case CONST_DOUBLE:
8505 if (GET_MODE (x) == DImode
8506 && ((XINT (x, 3) == 0
8507 && (unsigned HOST_WIDE_INT) XINT (x, 2) < 0x1000)
8508 || (XINT (x, 3) == -1
8509 && XINT (x, 2) < 0
8510 && XINT (x, 2) >= -0x1000)))
8511 *total = 0;
8512 else
8513 *total = 8;
8514 return true;
8515
8516 case MEM:
8517 /* If outer-code was a sign or zero extension, a cost
8518 of COSTS_N_INSNS (1) was already added in. This is
8519 why we are subtracting it back out. */
8520 if (outer_code == ZERO_EXTEND)
8521 {
8522 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8523 }
8524 else if (outer_code == SIGN_EXTEND)
8525 {
8526 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8527 }
8528 else if (float_mode_p)
8529 {
8530 *total = sparc_costs->float_load;
8531 }
8532 else
8533 {
8534 *total = sparc_costs->int_load;
8535 }
8536
8537 return true;
8538
8539 case PLUS:
8540 case MINUS:
8541 if (float_mode_p)
8542 *total = sparc_costs->float_plusminus;
8543 else
8544 *total = COSTS_N_INSNS (1);
8545 return false;
8546
8547 case MULT:
8548 if (float_mode_p)
8549 *total = sparc_costs->float_mul;
8550 else if (! TARGET_HARD_MUL)
8551 *total = COSTS_N_INSNS (25);
8552 else
8553 {
8554 int bit_cost;
8555
8556 bit_cost = 0;
8557 if (sparc_costs->int_mul_bit_factor)
8558 {
8559 int nbits;
8560
8561 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8562 {
8563 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8564 for (nbits = 0; value != 0; value &= value - 1)
8565 nbits++;
8566 }
8567 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8568 && GET_MODE (XEXP (x, 1)) == DImode)
8569 {
8570 rtx x1 = XEXP (x, 1);
8571 unsigned HOST_WIDE_INT value1 = XINT (x1, 2);
8572 unsigned HOST_WIDE_INT value2 = XINT (x1, 3);
8573
8574 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8575 nbits++;
8576 for (; value2 != 0; value2 &= value2 - 1)
8577 nbits++;
8578 }
8579 else
8580 nbits = 7;
8581
8582 if (nbits < 3)
8583 nbits = 3;
8584 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8585 bit_cost = COSTS_N_INSNS (bit_cost);
8586 }
8587
8588 if (mode == DImode)
8589 *total = sparc_costs->int_mulX + bit_cost;
8590 else
8591 *total = sparc_costs->int_mul + bit_cost;
8592 }
8593 return false;
8594
8595 case ASHIFT:
8596 case ASHIFTRT:
8597 case LSHIFTRT:
8598 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8599 return false;
8600
8601 case DIV:
8602 case UDIV:
8603 case MOD:
8604 case UMOD:
8605 if (float_mode_p)
8606 {
8607 if (mode == DFmode)
8608 *total = sparc_costs->float_div_df;
8609 else
8610 *total = sparc_costs->float_div_sf;
8611 }
8612 else
8613 {
8614 if (mode == DImode)
8615 *total = sparc_costs->int_divX;
8616 else
8617 *total = sparc_costs->int_div;
8618 }
8619 return false;
8620
8621 case NEG:
8622 if (! float_mode_p)
8623 {
8624 *total = COSTS_N_INSNS (1);
8625 return false;
8626 }
8627 /* FALLTHRU */
8628
8629 case ABS:
8630 case FLOAT:
8631 case UNSIGNED_FLOAT:
8632 case FIX:
8633 case UNSIGNED_FIX:
8634 case FLOAT_EXTEND:
8635 case FLOAT_TRUNCATE:
8636 *total = sparc_costs->float_move;
8637 return false;
8638
8639 case SQRT:
8640 if (mode == DFmode)
8641 *total = sparc_costs->float_sqrt_df;
8642 else
8643 *total = sparc_costs->float_sqrt_sf;
8644 return false;
8645
8646 case COMPARE:
8647 if (float_mode_p)
8648 *total = sparc_costs->float_cmp;
8649 else
8650 *total = COSTS_N_INSNS (1);
8651 return false;
8652
8653 case IF_THEN_ELSE:
8654 if (float_mode_p)
8655 *total = sparc_costs->float_cmove;
8656 else
8657 *total = sparc_costs->int_cmove;
8658 return false;
8659
8660 default:
8661 return false;
8662 }
8663 }
8664
8665 /* Emit the sequence of insns SEQ while preserving the register REG. */
8666
8667 static void
8668 emit_and_preserve (rtx seq, rtx reg)
8669 {
8670 rtx slot = gen_rtx_MEM (word_mode,
8671 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8672
8673 emit_stack_pointer_decrement (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT));
8674 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8675 emit_insn (seq);
8676 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8677 emit_stack_pointer_increment (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT));
8678 }
8679
8680 /* Output the assembler code for a thunk function. THUNK_DECL is the
8681 declaration for the thunk function itself, FUNCTION is the decl for
8682 the target function. DELTA is an immediate constant offset to be
8683 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8684 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8685
8686 static void
8687 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8688 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8689 tree function)
8690 {
8691 rtx this, insn, funexp;
8692 unsigned int int_arg_first;
8693
8694 reload_completed = 1;
8695 epilogue_completed = 1;
8696 no_new_pseudos = 1;
8697 reset_block_changes ();
8698
8699 emit_note (NOTE_INSN_PROLOGUE_END);
8700
8701 if (flag_delayed_branch)
8702 {
8703 /* We will emit a regular sibcall below, so we need to instruct
8704 output_sibcall that we are in a leaf function. */
8705 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8706
8707 /* This will cause final.c to invoke leaf_renumber_regs so we
8708 must behave as if we were in a not-yet-leafified function. */
8709 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8710 }
8711 else
8712 {
8713 /* We will emit the sibcall manually below, so we will need to
8714 manually spill non-leaf registers. */
8715 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8716
8717 /* We really are in a leaf function. */
8718 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8719 }
8720
8721 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8722 returns a structure, the structure return pointer is there instead. */
8723 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8724 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8725 else
8726 this = gen_rtx_REG (Pmode, int_arg_first);
8727
8728 /* Add DELTA. When possible use a plain add, otherwise load it into
8729 a register first. */
8730 if (delta)
8731 {
8732 rtx delta_rtx = GEN_INT (delta);
8733
8734 if (! SPARC_SIMM13_P (delta))
8735 {
8736 rtx scratch = gen_rtx_REG (Pmode, 1);
8737 emit_move_insn (scratch, delta_rtx);
8738 delta_rtx = scratch;
8739 }
8740
8741 /* THIS += DELTA. */
8742 emit_insn (gen_add2_insn (this, delta_rtx));
8743 }
8744
8745 /* Add the word at address (*THIS + VCALL_OFFSET). */
8746 if (vcall_offset)
8747 {
8748 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8749 rtx scratch = gen_rtx_REG (Pmode, 1);
8750
8751 if (vcall_offset >= 0)
8752 abort ();
8753
8754 /* SCRATCH = *THIS. */
8755 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8756
8757 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8758 may not have any available scratch register at this point. */
8759 if (SPARC_SIMM13_P (vcall_offset))
8760 ;
8761 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8762 else if (! fixed_regs[5]
8763 /* The below sequence is made up of at least 2 insns,
8764 while the default method may need only one. */
8765 && vcall_offset < -8192)
8766 {
8767 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8768 emit_move_insn (scratch2, vcall_offset_rtx);
8769 vcall_offset_rtx = scratch2;
8770 }
8771 else
8772 {
8773 rtx increment = GEN_INT (-4096);
8774
8775 /* VCALL_OFFSET is a negative number whose typical range can be
8776 estimated as -32768..0 in 32-bit mode. In almost all cases
8777 it is therefore cheaper to emit multiple add insns than
8778 spilling and loading the constant into a register (at least
8779 6 insns). */
8780 while (! SPARC_SIMM13_P (vcall_offset))
8781 {
8782 emit_insn (gen_add2_insn (scratch, increment));
8783 vcall_offset += 4096;
8784 }
8785 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8786 }
8787
8788 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8789 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8790 gen_rtx_PLUS (Pmode,
8791 scratch,
8792 vcall_offset_rtx)));
8793
8794 /* THIS += *(*THIS + VCALL_OFFSET). */
8795 emit_insn (gen_add2_insn (this, scratch));
8796 }
8797
8798 /* Generate a tail call to the target function. */
8799 if (! TREE_USED (function))
8800 {
8801 assemble_external (function);
8802 TREE_USED (function) = 1;
8803 }
8804 funexp = XEXP (DECL_RTL (function), 0);
8805
8806 if (flag_delayed_branch)
8807 {
8808 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8809 insn = emit_call_insn (gen_sibcall (funexp));
8810 SIBLING_CALL_P (insn) = 1;
8811 }
8812 else
8813 {
8814 /* The hoops we have to jump through in order to generate a sibcall
8815 without using delay slots... */
8816 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
8817
8818 if (flag_pic)
8819 {
8820 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8821 start_sequence ();
8822 load_pic_register (); /* clobbers %o7 */
8823 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8824 seq = get_insns ();
8825 end_sequence ();
8826 emit_and_preserve (seq, spill_reg);
8827 }
8828 else if (TARGET_ARCH32)
8829 {
8830 emit_insn (gen_rtx_SET (VOIDmode,
8831 scratch,
8832 gen_rtx_HIGH (SImode, funexp)));
8833 emit_insn (gen_rtx_SET (VOIDmode,
8834 scratch,
8835 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8836 }
8837 else /* TARGET_ARCH64 */
8838 {
8839 switch (sparc_cmodel)
8840 {
8841 case CM_MEDLOW:
8842 case CM_MEDMID:
8843 /* The destination can serve as a temporary. */
8844 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8845 break;
8846
8847 case CM_MEDANY:
8848 case CM_EMBMEDANY:
8849 /* The destination cannot serve as a temporary. */
8850 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8851 start_sequence ();
8852 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8853 seq = get_insns ();
8854 end_sequence ();
8855 emit_and_preserve (seq, spill_reg);
8856 break;
8857
8858 default:
8859 abort();
8860 }
8861 }
8862
8863 emit_jump_insn (gen_indirect_jump (scratch));
8864 }
8865
8866 emit_barrier ();
8867
8868 /* Run just enough of rest_of_compilation to get the insns emitted.
8869 There's not really enough bulk here to make other passes such as
8870 instruction scheduling worth while. Note that use_thunk calls
8871 assemble_start_function and assemble_end_function. */
8872 insn = get_insns ();
8873 insn_locators_initialize ();
8874 shorten_branches (insn);
8875 final_start_function (insn, file, 1);
8876 final (insn, file, 1, 0);
8877 final_end_function ();
8878
8879 reload_completed = 0;
8880 epilogue_completed = 0;
8881 no_new_pseudos = 0;
8882 }
8883
8884 /* Return true if sparc_output_mi_thunk would be able to output the
8885 assembler code for the thunk function specified by the arguments
8886 it is passed, and false otherwise. */
8887 static bool
8888 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8889 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8890 HOST_WIDE_INT vcall_offset,
8891 tree function ATTRIBUTE_UNUSED)
8892 {
8893 /* Bound the loop used in the default method above. */
8894 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8895 }
8896
8897 /* How to allocate a 'struct machine_function'. */
8898
8899 static struct machine_function *
8900 sparc_init_machine_status (void)
8901 {
8902 return ggc_alloc_cleared (sizeof (struct machine_function));
8903 }
8904
8905 /* Locate some local-dynamic symbol still in use by this function
8906 so that we can print its name in local-dynamic base patterns. */
8907
8908 static const char *
8909 get_some_local_dynamic_name (void)
8910 {
8911 rtx insn;
8912
8913 if (cfun->machine->some_ld_name)
8914 return cfun->machine->some_ld_name;
8915
8916 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8917 if (INSN_P (insn)
8918 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8919 return cfun->machine->some_ld_name;
8920
8921 abort ();
8922 }
8923
8924 static int
8925 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8926 {
8927 rtx x = *px;
8928
8929 if (x
8930 && GET_CODE (x) == SYMBOL_REF
8931 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8932 {
8933 cfun->machine->some_ld_name = XSTR (x, 0);
8934 return 1;
8935 }
8936
8937 return 0;
8938 }
8939
8940 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
8941 We need to emit DTP-relative relocations. */
8942
8943 void
8944 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8945 {
8946 switch (size)
8947 {
8948 case 4:
8949 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8950 break;
8951 case 8:
8952 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8953 break;
8954 default:
8955 abort ();
8956 }
8957 output_addr_const (file, x);
8958 fputs (")", file);
8959 }
8960
8961 #include "gt-sparc.h"