Sun Niagara specific optimizations.
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com)
6 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7 at Cygnus Support.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "insn-codes.h"
37 #include "conditions.h"
38 #include "output.h"
39 #include "insn-attr.h"
40 #include "flags.h"
41 #include "function.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "toplev.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "tree-gimple.h"
53 #include "langhooks.h"
54
55 /* Processor costs */
56 static const
57 struct processor_costs cypress_costs = {
58 COSTS_N_INSNS (2), /* int load */
59 COSTS_N_INSNS (2), /* int signed load */
60 COSTS_N_INSNS (2), /* int zeroed load */
61 COSTS_N_INSNS (2), /* float load */
62 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
63 COSTS_N_INSNS (5), /* fadd, fsub */
64 COSTS_N_INSNS (1), /* fcmp */
65 COSTS_N_INSNS (1), /* fmov, fmovr */
66 COSTS_N_INSNS (7), /* fmul */
67 COSTS_N_INSNS (37), /* fdivs */
68 COSTS_N_INSNS (37), /* fdivd */
69 COSTS_N_INSNS (63), /* fsqrts */
70 COSTS_N_INSNS (63), /* fsqrtd */
71 COSTS_N_INSNS (1), /* imul */
72 COSTS_N_INSNS (1), /* imulX */
73 0, /* imul bit factor */
74 COSTS_N_INSNS (1), /* idiv */
75 COSTS_N_INSNS (1), /* idivX */
76 COSTS_N_INSNS (1), /* movcc/movr */
77 0, /* shift penalty */
78 };
79
80 static const
81 struct processor_costs supersparc_costs = {
82 COSTS_N_INSNS (1), /* int load */
83 COSTS_N_INSNS (1), /* int signed load */
84 COSTS_N_INSNS (1), /* int zeroed load */
85 COSTS_N_INSNS (0), /* float load */
86 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
87 COSTS_N_INSNS (3), /* fadd, fsub */
88 COSTS_N_INSNS (3), /* fcmp */
89 COSTS_N_INSNS (1), /* fmov, fmovr */
90 COSTS_N_INSNS (3), /* fmul */
91 COSTS_N_INSNS (6), /* fdivs */
92 COSTS_N_INSNS (9), /* fdivd */
93 COSTS_N_INSNS (12), /* fsqrts */
94 COSTS_N_INSNS (12), /* fsqrtd */
95 COSTS_N_INSNS (4), /* imul */
96 COSTS_N_INSNS (4), /* imulX */
97 0, /* imul bit factor */
98 COSTS_N_INSNS (4), /* idiv */
99 COSTS_N_INSNS (4), /* idivX */
100 COSTS_N_INSNS (1), /* movcc/movr */
101 1, /* shift penalty */
102 };
103
104 static const
105 struct processor_costs hypersparc_costs = {
106 COSTS_N_INSNS (1), /* int load */
107 COSTS_N_INSNS (1), /* int signed load */
108 COSTS_N_INSNS (1), /* int zeroed load */
109 COSTS_N_INSNS (1), /* float load */
110 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
111 COSTS_N_INSNS (1), /* fadd, fsub */
112 COSTS_N_INSNS (1), /* fcmp */
113 COSTS_N_INSNS (1), /* fmov, fmovr */
114 COSTS_N_INSNS (1), /* fmul */
115 COSTS_N_INSNS (8), /* fdivs */
116 COSTS_N_INSNS (12), /* fdivd */
117 COSTS_N_INSNS (17), /* fsqrts */
118 COSTS_N_INSNS (17), /* fsqrtd */
119 COSTS_N_INSNS (17), /* imul */
120 COSTS_N_INSNS (17), /* imulX */
121 0, /* imul bit factor */
122 COSTS_N_INSNS (17), /* idiv */
123 COSTS_N_INSNS (17), /* idivX */
124 COSTS_N_INSNS (1), /* movcc/movr */
125 0, /* shift penalty */
126 };
127
128 static const
129 struct processor_costs sparclet_costs = {
130 COSTS_N_INSNS (3), /* int load */
131 COSTS_N_INSNS (3), /* int signed load */
132 COSTS_N_INSNS (1), /* int zeroed load */
133 COSTS_N_INSNS (1), /* float load */
134 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
135 COSTS_N_INSNS (1), /* fadd, fsub */
136 COSTS_N_INSNS (1), /* fcmp */
137 COSTS_N_INSNS (1), /* fmov, fmovr */
138 COSTS_N_INSNS (1), /* fmul */
139 COSTS_N_INSNS (1), /* fdivs */
140 COSTS_N_INSNS (1), /* fdivd */
141 COSTS_N_INSNS (1), /* fsqrts */
142 COSTS_N_INSNS (1), /* fsqrtd */
143 COSTS_N_INSNS (5), /* imul */
144 COSTS_N_INSNS (5), /* imulX */
145 0, /* imul bit factor */
146 COSTS_N_INSNS (5), /* idiv */
147 COSTS_N_INSNS (5), /* idivX */
148 COSTS_N_INSNS (1), /* movcc/movr */
149 0, /* shift penalty */
150 };
151
152 static const
153 struct processor_costs ultrasparc_costs = {
154 COSTS_N_INSNS (2), /* int load */
155 COSTS_N_INSNS (3), /* int signed load */
156 COSTS_N_INSNS (2), /* int zeroed load */
157 COSTS_N_INSNS (2), /* float load */
158 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
159 COSTS_N_INSNS (4), /* fadd, fsub */
160 COSTS_N_INSNS (1), /* fcmp */
161 COSTS_N_INSNS (2), /* fmov, fmovr */
162 COSTS_N_INSNS (4), /* fmul */
163 COSTS_N_INSNS (13), /* fdivs */
164 COSTS_N_INSNS (23), /* fdivd */
165 COSTS_N_INSNS (13), /* fsqrts */
166 COSTS_N_INSNS (23), /* fsqrtd */
167 COSTS_N_INSNS (4), /* imul */
168 COSTS_N_INSNS (4), /* imulX */
169 2, /* imul bit factor */
170 COSTS_N_INSNS (37), /* idiv */
171 COSTS_N_INSNS (68), /* idivX */
172 COSTS_N_INSNS (2), /* movcc/movr */
173 2, /* shift penalty */
174 };
175
176 static const
177 struct processor_costs ultrasparc3_costs = {
178 COSTS_N_INSNS (2), /* int load */
179 COSTS_N_INSNS (3), /* int signed load */
180 COSTS_N_INSNS (3), /* int zeroed load */
181 COSTS_N_INSNS (2), /* float load */
182 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
183 COSTS_N_INSNS (4), /* fadd, fsub */
184 COSTS_N_INSNS (5), /* fcmp */
185 COSTS_N_INSNS (3), /* fmov, fmovr */
186 COSTS_N_INSNS (4), /* fmul */
187 COSTS_N_INSNS (17), /* fdivs */
188 COSTS_N_INSNS (20), /* fdivd */
189 COSTS_N_INSNS (20), /* fsqrts */
190 COSTS_N_INSNS (29), /* fsqrtd */
191 COSTS_N_INSNS (6), /* imul */
192 COSTS_N_INSNS (6), /* imulX */
193 0, /* imul bit factor */
194 COSTS_N_INSNS (40), /* idiv */
195 COSTS_N_INSNS (71), /* idivX */
196 COSTS_N_INSNS (2), /* movcc/movr */
197 0, /* shift penalty */
198 };
199
200 static const
201 struct processor_costs niagara_costs = {
202 COSTS_N_INSNS (3), /* int load */
203 COSTS_N_INSNS (3), /* int signed load */
204 COSTS_N_INSNS (3), /* int zeroed load */
205 COSTS_N_INSNS (9), /* float load */
206 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
207 COSTS_N_INSNS (8), /* fadd, fsub */
208 COSTS_N_INSNS (26), /* fcmp */
209 COSTS_N_INSNS (8), /* fmov, fmovr */
210 COSTS_N_INSNS (29), /* fmul */
211 COSTS_N_INSNS (54), /* fdivs */
212 COSTS_N_INSNS (83), /* fdivd */
213 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
214 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
215 COSTS_N_INSNS (11), /* imul */
216 COSTS_N_INSNS (11), /* imulX */
217 0, /* imul bit factor */
218 COSTS_N_INSNS (72), /* idiv */
219 COSTS_N_INSNS (72), /* idivX */
220 COSTS_N_INSNS (1), /* movcc/movr */
221 0, /* shift penalty */
222 };
223
224 const struct processor_costs *sparc_costs = &cypress_costs;
225
226 #ifdef HAVE_AS_RELAX_OPTION
227 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
228 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
229 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
230 somebody does not branch between the sethi and jmp. */
231 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
232 #else
233 #define LEAF_SIBCALL_SLOT_RESERVED_P \
234 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
235 #endif
236
237 /* Global variables for machine-dependent things. */
238
239 /* Size of frame. Need to know this to emit return insns from leaf procedures.
240 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
241 reload pass. This is important as the value is later used for scheduling
242 (to see what can go in a delay slot).
243 APPARENT_FSIZE is the size of the stack less the register save area and less
244 the outgoing argument area. It is used when saving call preserved regs. */
245 static HOST_WIDE_INT apparent_fsize;
246 static HOST_WIDE_INT actual_fsize;
247
248 /* Number of live general or floating point registers needed to be
249 saved (as 4-byte quantities). */
250 static int num_gfregs;
251
252 /* The alias set for prologue/epilogue register save/restore. */
253 static GTY(()) int sparc_sr_alias_set;
254
255 /* The alias set for the structure return value. */
256 static GTY(()) int struct_value_alias_set;
257
258 /* Save the operands last given to a compare for use when we
259 generate a scc or bcc insn. */
260 rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
261
262 /* Vector to say how input registers are mapped to output registers.
263 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
264 eliminate it. You must use -fomit-frame-pointer to get that. */
265 char leaf_reg_remap[] =
266 { 0, 1, 2, 3, 4, 5, 6, 7,
267 -1, -1, -1, -1, -1, -1, 14, -1,
268 -1, -1, -1, -1, -1, -1, -1, -1,
269 8, 9, 10, 11, 12, 13, -1, 15,
270
271 32, 33, 34, 35, 36, 37, 38, 39,
272 40, 41, 42, 43, 44, 45, 46, 47,
273 48, 49, 50, 51, 52, 53, 54, 55,
274 56, 57, 58, 59, 60, 61, 62, 63,
275 64, 65, 66, 67, 68, 69, 70, 71,
276 72, 73, 74, 75, 76, 77, 78, 79,
277 80, 81, 82, 83, 84, 85, 86, 87,
278 88, 89, 90, 91, 92, 93, 94, 95,
279 96, 97, 98, 99, 100};
280
281 /* Vector, indexed by hard register number, which contains 1
282 for a register that is allowable in a candidate for leaf
283 function treatment. */
284 char sparc_leaf_regs[] =
285 { 1, 1, 1, 1, 1, 1, 1, 1,
286 0, 0, 0, 0, 0, 0, 1, 0,
287 0, 0, 0, 0, 0, 0, 0, 0,
288 1, 1, 1, 1, 1, 1, 0, 1,
289 1, 1, 1, 1, 1, 1, 1, 1,
290 1, 1, 1, 1, 1, 1, 1, 1,
291 1, 1, 1, 1, 1, 1, 1, 1,
292 1, 1, 1, 1, 1, 1, 1, 1,
293 1, 1, 1, 1, 1, 1, 1, 1,
294 1, 1, 1, 1, 1, 1, 1, 1,
295 1, 1, 1, 1, 1, 1, 1, 1,
296 1, 1, 1, 1, 1, 1, 1, 1,
297 1, 1, 1, 1, 1};
298
299 struct machine_function GTY(())
300 {
301 /* Some local-dynamic TLS symbol name. */
302 const char *some_ld_name;
303
304 /* True if the current function is leaf and uses only leaf regs,
305 so that the SPARC leaf function optimization can be applied.
306 Private version of current_function_uses_only_leaf_regs, see
307 sparc_expand_prologue for the rationale. */
308 int leaf_function_p;
309
310 /* True if the data calculated by sparc_expand_prologue are valid. */
311 bool prologue_data_valid_p;
312 };
313
314 #define sparc_leaf_function_p cfun->machine->leaf_function_p
315 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
316
317 /* Register we pretend to think the frame pointer is allocated to.
318 Normally, this is %fp, but if we are in a leaf procedure, this
319 is %sp+"something". We record "something" separately as it may
320 be too big for reg+constant addressing. */
321 static rtx frame_base_reg;
322 static HOST_WIDE_INT frame_base_offset;
323
324 /* 1 if the next opcode is to be specially indented. */
325 int sparc_indent_opcode = 0;
326
327 static bool sparc_handle_option (size_t, const char *, int);
328 static void sparc_init_modes (void);
329 static void scan_record_type (tree, int *, int *, int *);
330 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
331 tree, int, int, int *, int *);
332
333 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
334 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
335
336 static void sparc_output_addr_vec (rtx);
337 static void sparc_output_addr_diff_vec (rtx);
338 static void sparc_output_deferred_case_vectors (void);
339 static rtx sparc_builtin_saveregs (void);
340 static int epilogue_renumber (rtx *, int);
341 static bool sparc_assemble_integer (rtx, unsigned int, int);
342 static int set_extends (rtx);
343 static void emit_pic_helper (void);
344 static void load_pic_register (bool);
345 static int save_or_restore_regs (int, int, rtx, int, int);
346 static void emit_save_or_restore_regs (int);
347 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
348 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
349 #ifdef OBJECT_FORMAT_ELF
350 static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
351 #endif
352
353 static int sparc_adjust_cost (rtx, rtx, rtx, int);
354 static int sparc_issue_rate (void);
355 static void sparc_sched_init (FILE *, int, int);
356 static int sparc_use_sched_lookahead (void);
357
358 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
359 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
360 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
361 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
362 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
363
364 static bool sparc_function_ok_for_sibcall (tree, tree);
365 static void sparc_init_libfuncs (void);
366 static void sparc_init_builtins (void);
367 static void sparc_vis_init_builtins (void);
368 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
369 static tree sparc_fold_builtin (tree, tree, bool);
370 static int sparc_vis_mul8x16 (int, int);
371 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
372 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
373 HOST_WIDE_INT, tree);
374 static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
375 HOST_WIDE_INT, tree);
376 static struct machine_function * sparc_init_machine_status (void);
377 static bool sparc_cannot_force_const_mem (rtx);
378 static rtx sparc_tls_get_addr (void);
379 static rtx sparc_tls_got (void);
380 static const char *get_some_local_dynamic_name (void);
381 static int get_some_local_dynamic_name_1 (rtx *, void *);
382 static bool sparc_rtx_costs (rtx, int, int, int *);
383 static bool sparc_promote_prototypes (tree);
384 static rtx sparc_struct_value_rtx (tree, int);
385 static bool sparc_return_in_memory (tree, tree);
386 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
387 static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
388 static bool sparc_vector_mode_supported_p (enum machine_mode);
389 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
390 enum machine_mode, tree, bool);
391 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
392 enum machine_mode, tree, bool);
393 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
394 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
395 static void sparc_file_end (void);
396 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
397 static const char *sparc_mangle_fundamental_type (tree);
398 #endif
399 #ifdef SUBTARGET_ATTRIBUTE_TABLE
400 const struct attribute_spec sparc_attribute_table[];
401 #endif
402 \f
403 /* Option handling. */
404
405 /* Parsed value. */
406 enum cmodel sparc_cmodel;
407
408 char sparc_hard_reg_printed[8];
409
410 struct sparc_cpu_select sparc_select[] =
411 {
412 /* switch name, tune arch */
413 { (char *)0, "default", 1, 1 },
414 { (char *)0, "-mcpu=", 1, 1 },
415 { (char *)0, "-mtune=", 1, 0 },
416 { 0, 0, 0, 0 }
417 };
418
419 /* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
420 enum processor_type sparc_cpu;
421
422 /* Whether\fan FPU option was specified. */
423 static bool fpu_option_set = false;
424
425 /* Initialize the GCC target structure. */
426
427 /* The sparc default is to use .half rather than .short for aligned
428 HI objects. Use .word instead of .long on non-ELF systems. */
429 #undef TARGET_ASM_ALIGNED_HI_OP
430 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
431 #ifndef OBJECT_FORMAT_ELF
432 #undef TARGET_ASM_ALIGNED_SI_OP
433 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
434 #endif
435
436 #undef TARGET_ASM_UNALIGNED_HI_OP
437 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
438 #undef TARGET_ASM_UNALIGNED_SI_OP
439 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
440 #undef TARGET_ASM_UNALIGNED_DI_OP
441 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
442
443 /* The target hook has to handle DI-mode values. */
444 #undef TARGET_ASM_INTEGER
445 #define TARGET_ASM_INTEGER sparc_assemble_integer
446
447 #undef TARGET_ASM_FUNCTION_PROLOGUE
448 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
449 #undef TARGET_ASM_FUNCTION_EPILOGUE
450 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
451
452 #undef TARGET_SCHED_ADJUST_COST
453 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
454 #undef TARGET_SCHED_ISSUE_RATE
455 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
456 #undef TARGET_SCHED_INIT
457 #define TARGET_SCHED_INIT sparc_sched_init
458 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
459 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
460
461 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
462 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
463
464 #undef TARGET_INIT_LIBFUNCS
465 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
466 #undef TARGET_INIT_BUILTINS
467 #define TARGET_INIT_BUILTINS sparc_init_builtins
468
469 #undef TARGET_EXPAND_BUILTIN
470 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
471 #undef TARGET_FOLD_BUILTIN
472 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
473
474 #if TARGET_TLS
475 #undef TARGET_HAVE_TLS
476 #define TARGET_HAVE_TLS true
477 #endif
478
479 #undef TARGET_CANNOT_FORCE_CONST_MEM
480 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
481
482 #undef TARGET_ASM_OUTPUT_MI_THUNK
483 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
484 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
485 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
486
487 #undef TARGET_RTX_COSTS
488 #define TARGET_RTX_COSTS sparc_rtx_costs
489 #undef TARGET_ADDRESS_COST
490 #define TARGET_ADDRESS_COST hook_int_rtx_0
491
492 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
493 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
494 test for this value. */
495 #undef TARGET_PROMOTE_FUNCTION_ARGS
496 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
497
498 /* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
499 no-op for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime
500 test for this value. */
501 #undef TARGET_PROMOTE_FUNCTION_RETURN
502 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
503
504 #undef TARGET_PROMOTE_PROTOTYPES
505 #define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
506
507 #undef TARGET_STRUCT_VALUE_RTX
508 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
509 #undef TARGET_RETURN_IN_MEMORY
510 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
511 #undef TARGET_MUST_PASS_IN_STACK
512 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
513 #undef TARGET_PASS_BY_REFERENCE
514 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
515 #undef TARGET_ARG_PARTIAL_BYTES
516 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
517
518 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
519 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
520 #undef TARGET_STRICT_ARGUMENT_NAMING
521 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
522
523 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
524 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
525
526 #undef TARGET_VECTOR_MODE_SUPPORTED_P
527 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
528
529 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
530 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
531
532 #ifdef SUBTARGET_INSERT_ATTRIBUTES
533 #undef TARGET_INSERT_ATTRIBUTES
534 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
535 #endif
536
537 #ifdef SUBTARGET_ATTRIBUTE_TABLE
538 #undef TARGET_ATTRIBUTE_TABLE
539 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
540 #endif
541
542 #undef TARGET_RELAXED_ORDERING
543 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
544
545 #undef TARGET_DEFAULT_TARGET_FLAGS
546 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
547 #undef TARGET_HANDLE_OPTION
548 #define TARGET_HANDLE_OPTION sparc_handle_option
549
550 #if TARGET_GNU_TLS
551 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
552 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
553 #endif
554
555 #undef TARGET_ASM_FILE_END
556 #define TARGET_ASM_FILE_END sparc_file_end
557
558 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
559 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
560 #define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
561 #endif
562
563 struct gcc_target targetm = TARGET_INITIALIZER;
564
565 /* Implement TARGET_HANDLE_OPTION. */
566
567 static bool
568 sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
569 {
570 switch (code)
571 {
572 case OPT_mfpu:
573 case OPT_mhard_float:
574 case OPT_msoft_float:
575 fpu_option_set = true;
576 break;
577
578 case OPT_mcpu_:
579 sparc_select[1].string = arg;
580 break;
581
582 case OPT_mtune_:
583 sparc_select[2].string = arg;
584 break;
585 }
586
587 return true;
588 }
589
590 /* Validate and override various options, and do some machine dependent
591 initialization. */
592
593 void
594 sparc_override_options (void)
595 {
596 static struct code_model {
597 const char *const name;
598 const int value;
599 } const cmodels[] = {
600 { "32", CM_32 },
601 { "medlow", CM_MEDLOW },
602 { "medmid", CM_MEDMID },
603 { "medany", CM_MEDANY },
604 { "embmedany", CM_EMBMEDANY },
605 { 0, 0 }
606 };
607 const struct code_model *cmodel;
608 /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
609 static struct cpu_default {
610 const int cpu;
611 const char *const name;
612 } const cpu_default[] = {
613 /* There must be one entry here for each TARGET_CPU value. */
614 { TARGET_CPU_sparc, "cypress" },
615 { TARGET_CPU_sparclet, "tsc701" },
616 { TARGET_CPU_sparclite, "f930" },
617 { TARGET_CPU_v8, "v8" },
618 { TARGET_CPU_hypersparc, "hypersparc" },
619 { TARGET_CPU_sparclite86x, "sparclite86x" },
620 { TARGET_CPU_supersparc, "supersparc" },
621 { TARGET_CPU_v9, "v9" },
622 { TARGET_CPU_ultrasparc, "ultrasparc" },
623 { TARGET_CPU_ultrasparc3, "ultrasparc3" },
624 { TARGET_CPU_niagara, "niagara" },
625 { 0, 0 }
626 };
627 const struct cpu_default *def;
628 /* Table of values for -m{cpu,tune}=. */
629 static struct cpu_table {
630 const char *const name;
631 const enum processor_type processor;
632 const int disable;
633 const int enable;
634 } const cpu_table[] = {
635 { "v7", PROCESSOR_V7, MASK_ISA, 0 },
636 { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
637 { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
638 /* TI TMS390Z55 supersparc */
639 { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
640 { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
641 /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
642 The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
643 { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
644 { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
645 { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
646 { "sparclite86x", PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
647 MASK_SPARCLITE },
648 { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
649 /* TEMIC sparclet */
650 { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
651 { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
652 /* TI ultrasparc I, II, IIi */
653 { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
654 /* Although insns using %y are deprecated, it is a clear win on current
655 ultrasparcs. */
656 |MASK_DEPRECATED_V8_INSNS},
657 /* TI ultrasparc III */
658 /* ??? Check if %y issue still holds true in ultra3. */
659 { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
660 /* UltraSPARC T1 */
661 { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
662 { 0, 0, 0, 0 }
663 };
664 const struct cpu_table *cpu;
665 const struct sparc_cpu_select *sel;
666 int fpu;
667
668 #ifndef SPARC_BI_ARCH
669 /* Check for unsupported architecture size. */
670 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
671 error ("%s is not supported by this configuration",
672 DEFAULT_ARCH32_P ? "-m64" : "-m32");
673 #endif
674
675 /* We force all 64bit archs to use 128 bit long double */
676 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
677 {
678 error ("-mlong-double-64 not allowed with -m64");
679 target_flags |= MASK_LONG_DOUBLE_128;
680 }
681
682 /* Code model selection. */
683 sparc_cmodel = SPARC_DEFAULT_CMODEL;
684
685 #ifdef SPARC_BI_ARCH
686 if (TARGET_ARCH32)
687 sparc_cmodel = CM_32;
688 #endif
689
690 if (sparc_cmodel_string != NULL)
691 {
692 if (TARGET_ARCH64)
693 {
694 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
695 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
696 break;
697 if (cmodel->name == NULL)
698 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
699 else
700 sparc_cmodel = cmodel->value;
701 }
702 else
703 error ("-mcmodel= is not supported on 32 bit systems");
704 }
705
706 fpu = TARGET_FPU; /* save current -mfpu status */
707
708 /* Set the default CPU. */
709 for (def = &cpu_default[0]; def->name; ++def)
710 if (def->cpu == TARGET_CPU_DEFAULT)
711 break;
712 gcc_assert (def->name);
713 sparc_select[0].string = def->name;
714
715 for (sel = &sparc_select[0]; sel->name; ++sel)
716 {
717 if (sel->string)
718 {
719 for (cpu = &cpu_table[0]; cpu->name; ++cpu)
720 if (! strcmp (sel->string, cpu->name))
721 {
722 if (sel->set_tune_p)
723 sparc_cpu = cpu->processor;
724
725 if (sel->set_arch_p)
726 {
727 target_flags &= ~cpu->disable;
728 target_flags |= cpu->enable;
729 }
730 break;
731 }
732
733 if (! cpu->name)
734 error ("bad value (%s) for %s switch", sel->string, sel->name);
735 }
736 }
737
738 /* If -mfpu or -mno-fpu was explicitly used, don't override with
739 the processor default. */
740 if (fpu_option_set)
741 target_flags = (target_flags & ~MASK_FPU) | fpu;
742
743 /* Don't allow -mvis if FPU is disabled. */
744 if (! TARGET_FPU)
745 target_flags &= ~MASK_VIS;
746
747 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
748 are available.
749 -m64 also implies v9. */
750 if (TARGET_VIS || TARGET_ARCH64)
751 {
752 target_flags |= MASK_V9;
753 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
754 }
755
756 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
757 if (TARGET_V9 && TARGET_ARCH32)
758 target_flags |= MASK_DEPRECATED_V8_INSNS;
759
760 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
761 if (! TARGET_V9 || TARGET_ARCH64)
762 target_flags &= ~MASK_V8PLUS;
763
764 /* Don't use stack biasing in 32 bit mode. */
765 if (TARGET_ARCH32)
766 target_flags &= ~MASK_STACK_BIAS;
767
768 /* Supply a default value for align_functions. */
769 if (align_functions == 0
770 && (sparc_cpu == PROCESSOR_ULTRASPARC
771 || sparc_cpu == PROCESSOR_ULTRASPARC3
772 || sparc_cpu == PROCESSOR_NIAGARA))
773 align_functions = 32;
774
775 /* Validate PCC_STRUCT_RETURN. */
776 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
777 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
778
779 /* Only use .uaxword when compiling for a 64-bit target. */
780 if (!TARGET_ARCH64)
781 targetm.asm_out.unaligned_op.di = NULL;
782
783 /* Do various machine dependent initializations. */
784 sparc_init_modes ();
785
786 /* Acquire unique alias sets for our private stuff. */
787 sparc_sr_alias_set = new_alias_set ();
788 struct_value_alias_set = new_alias_set ();
789
790 /* Set up function hooks. */
791 init_machine_status = sparc_init_machine_status;
792
793 switch (sparc_cpu)
794 {
795 case PROCESSOR_V7:
796 case PROCESSOR_CYPRESS:
797 sparc_costs = &cypress_costs;
798 break;
799 case PROCESSOR_V8:
800 case PROCESSOR_SPARCLITE:
801 case PROCESSOR_SUPERSPARC:
802 sparc_costs = &supersparc_costs;
803 break;
804 case PROCESSOR_F930:
805 case PROCESSOR_F934:
806 case PROCESSOR_HYPERSPARC:
807 case PROCESSOR_SPARCLITE86X:
808 sparc_costs = &hypersparc_costs;
809 break;
810 case PROCESSOR_SPARCLET:
811 case PROCESSOR_TSC701:
812 sparc_costs = &sparclet_costs;
813 break;
814 case PROCESSOR_V9:
815 case PROCESSOR_ULTRASPARC:
816 sparc_costs = &ultrasparc_costs;
817 break;
818 case PROCESSOR_ULTRASPARC3:
819 sparc_costs = &ultrasparc3_costs;
820 break;
821 case PROCESSOR_NIAGARA:
822 sparc_costs = &niagara_costs;
823 break;
824 };
825
826 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
827 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
828 target_flags |= MASK_LONG_DOUBLE_128;
829 #endif
830 }
831 \f
832 #ifdef SUBTARGET_ATTRIBUTE_TABLE
833 /* Table of valid machine attributes. */
834 const struct attribute_spec sparc_attribute_table[] =
835 {
836 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
837 SUBTARGET_ATTRIBUTE_TABLE,
838 { NULL, 0, 0, false, false, false, NULL }
839 };
840 #endif
841 \f
842 /* Miscellaneous utilities. */
843
844 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
845 or branch on register contents instructions. */
846
847 int
848 v9_regcmp_p (enum rtx_code code)
849 {
850 return (code == EQ || code == NE || code == GE || code == LT
851 || code == LE || code == GT);
852 }
853
854 /* Nonzero if OP is a floating point constant which can
855 be loaded into an integer register using a single
856 sethi instruction. */
857
858 int
859 fp_sethi_p (rtx op)
860 {
861 if (GET_CODE (op) == CONST_DOUBLE)
862 {
863 REAL_VALUE_TYPE r;
864 long i;
865
866 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
867 REAL_VALUE_TO_TARGET_SINGLE (r, i);
868 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
869 }
870
871 return 0;
872 }
873
874 /* Nonzero if OP is a floating point constant which can
875 be loaded into an integer register using a single
876 mov instruction. */
877
878 int
879 fp_mov_p (rtx op)
880 {
881 if (GET_CODE (op) == CONST_DOUBLE)
882 {
883 REAL_VALUE_TYPE r;
884 long i;
885
886 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
887 REAL_VALUE_TO_TARGET_SINGLE (r, i);
888 return SPARC_SIMM13_P (i);
889 }
890
891 return 0;
892 }
893
894 /* Nonzero if OP is a floating point constant which can
895 be loaded into an integer register using a high/losum
896 instruction sequence. */
897
898 int
899 fp_high_losum_p (rtx op)
900 {
901 /* The constraints calling this should only be in
902 SFmode move insns, so any constant which cannot
903 be moved using a single insn will do. */
904 if (GET_CODE (op) == CONST_DOUBLE)
905 {
906 REAL_VALUE_TYPE r;
907 long i;
908
909 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
910 REAL_VALUE_TO_TARGET_SINGLE (r, i);
911 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
912 }
913
914 return 0;
915 }
916
917 /* Expand a move instruction. Return true if all work is done. */
918
919 bool
920 sparc_expand_move (enum machine_mode mode, rtx *operands)
921 {
922 /* Handle sets of MEM first. */
923 if (GET_CODE (operands[0]) == MEM)
924 {
925 /* 0 is a register (or a pair of registers) on SPARC. */
926 if (register_or_zero_operand (operands[1], mode))
927 return false;
928
929 if (!reload_in_progress)
930 {
931 operands[0] = validize_mem (operands[0]);
932 operands[1] = force_reg (mode, operands[1]);
933 }
934 }
935
936 /* Fixup TLS cases. */
937 if (TARGET_HAVE_TLS
938 && CONSTANT_P (operands[1])
939 && GET_CODE (operands[1]) != HIGH
940 && sparc_tls_referenced_p (operands [1]))
941 {
942 rtx sym = operands[1];
943 rtx addend = NULL;
944
945 if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
946 {
947 addend = XEXP (XEXP (sym, 0), 1);
948 sym = XEXP (XEXP (sym, 0), 0);
949 }
950
951 gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
952
953 sym = legitimize_tls_address (sym);
954 if (addend)
955 {
956 sym = gen_rtx_PLUS (mode, sym, addend);
957 sym = force_operand (sym, operands[0]);
958 }
959 operands[1] = sym;
960 }
961
962 /* Fixup PIC cases. */
963 if (flag_pic && CONSTANT_P (operands[1]))
964 {
965 if (pic_address_needs_scratch (operands[1]))
966 operands[1] = legitimize_pic_address (operands[1], mode, 0);
967
968 if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
969 {
970 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
971 return true;
972 }
973
974 if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
975 {
976 gcc_assert (TARGET_ARCH64);
977 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
978 return true;
979 }
980
981 if (symbolic_operand (operands[1], mode))
982 {
983 operands[1] = legitimize_pic_address (operands[1],
984 mode,
985 (reload_in_progress ?
986 operands[0] :
987 NULL_RTX));
988 return false;
989 }
990 }
991
992 /* If we are trying to toss an integer constant into FP registers,
993 or loading a FP or vector constant, force it into memory. */
994 if (CONSTANT_P (operands[1])
995 && REG_P (operands[0])
996 && (SPARC_FP_REG_P (REGNO (operands[0]))
997 || SCALAR_FLOAT_MODE_P (mode)
998 || VECTOR_MODE_P (mode)))
999 {
1000 /* emit_group_store will send such bogosity to us when it is
1001 not storing directly into memory. So fix this up to avoid
1002 crashes in output_constant_pool. */
1003 if (operands [1] == const0_rtx)
1004 operands[1] = CONST0_RTX (mode);
1005
1006 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1007 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1008 && const_zero_operand (operands[1], mode))
1009 return false;
1010
1011 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1012 /* We are able to build any SF constant in integer registers
1013 with at most 2 instructions. */
1014 && (mode == SFmode
1015 /* And any DF constant in integer registers. */
1016 || (mode == DFmode
1017 && (reload_completed || reload_in_progress))))
1018 return false;
1019
1020 operands[1] = force_const_mem (mode, operands[1]);
1021 if (!reload_in_progress)
1022 operands[1] = validize_mem (operands[1]);
1023 return false;
1024 }
1025
1026 /* Accept non-constants and valid constants unmodified. */
1027 if (!CONSTANT_P (operands[1])
1028 || GET_CODE (operands[1]) == HIGH
1029 || input_operand (operands[1], mode))
1030 return false;
1031
1032 switch (mode)
1033 {
1034 case QImode:
1035 /* All QImode constants require only one insn, so proceed. */
1036 break;
1037
1038 case HImode:
1039 case SImode:
1040 sparc_emit_set_const32 (operands[0], operands[1]);
1041 return true;
1042
1043 case DImode:
1044 /* input_operand should have filtered out 32-bit mode. */
1045 sparc_emit_set_const64 (operands[0], operands[1]);
1046 return true;
1047
1048 default:
1049 gcc_unreachable ();
1050 }
1051
1052 return false;
1053 }
1054
1055 /* Load OP1, a 32-bit constant, into OP0, a register.
1056 We know it can't be done in one insn when we get
1057 here, the move expander guarantees this. */
1058
1059 void
1060 sparc_emit_set_const32 (rtx op0, rtx op1)
1061 {
1062 enum machine_mode mode = GET_MODE (op0);
1063 rtx temp;
1064
1065 if (reload_in_progress || reload_completed)
1066 temp = op0;
1067 else
1068 temp = gen_reg_rtx (mode);
1069
1070 if (GET_CODE (op1) == CONST_INT)
1071 {
1072 gcc_assert (!small_int_operand (op1, mode)
1073 && !const_high_operand (op1, mode));
1074
1075 /* Emit them as real moves instead of a HIGH/LO_SUM,
1076 this way CSE can see everything and reuse intermediate
1077 values if it wants. */
1078 emit_insn (gen_rtx_SET (VOIDmode, temp,
1079 GEN_INT (INTVAL (op1)
1080 & ~(HOST_WIDE_INT)0x3ff)));
1081
1082 emit_insn (gen_rtx_SET (VOIDmode,
1083 op0,
1084 gen_rtx_IOR (mode, temp,
1085 GEN_INT (INTVAL (op1) & 0x3ff))));
1086 }
1087 else
1088 {
1089 /* A symbol, emit in the traditional way. */
1090 emit_insn (gen_rtx_SET (VOIDmode, temp,
1091 gen_rtx_HIGH (mode, op1)));
1092 emit_insn (gen_rtx_SET (VOIDmode,
1093 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1094 }
1095 }
1096
1097 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1098 If TEMP is nonzero, we are forbidden to use any other scratch
1099 registers. Otherwise, we are allowed to generate them as needed.
1100
1101 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1102 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1103
1104 void
1105 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1106 {
1107 rtx temp1, temp2, temp3, temp4, temp5;
1108 rtx ti_temp = 0;
1109
1110 if (temp && GET_MODE (temp) == TImode)
1111 {
1112 ti_temp = temp;
1113 temp = gen_rtx_REG (DImode, REGNO (temp));
1114 }
1115
1116 /* SPARC-V9 code-model support. */
1117 switch (sparc_cmodel)
1118 {
1119 case CM_MEDLOW:
1120 /* The range spanned by all instructions in the object is less
1121 than 2^31 bytes (2GB) and the distance from any instruction
1122 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1123 than 2^31 bytes (2GB).
1124
1125 The executable must be in the low 4TB of the virtual address
1126 space.
1127
1128 sethi %hi(symbol), %temp1
1129 or %temp1, %lo(symbol), %reg */
1130 if (temp)
1131 temp1 = temp; /* op0 is allowed. */
1132 else
1133 temp1 = gen_reg_rtx (DImode);
1134
1135 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1136 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1137 break;
1138
1139 case CM_MEDMID:
1140 /* The range spanned by all instructions in the object is less
1141 than 2^31 bytes (2GB) and the distance from any instruction
1142 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1143 than 2^31 bytes (2GB).
1144
1145 The executable must be in the low 16TB of the virtual address
1146 space.
1147
1148 sethi %h44(symbol), %temp1
1149 or %temp1, %m44(symbol), %temp2
1150 sllx %temp2, 12, %temp3
1151 or %temp3, %l44(symbol), %reg */
1152 if (temp)
1153 {
1154 temp1 = op0;
1155 temp2 = op0;
1156 temp3 = temp; /* op0 is allowed. */
1157 }
1158 else
1159 {
1160 temp1 = gen_reg_rtx (DImode);
1161 temp2 = gen_reg_rtx (DImode);
1162 temp3 = gen_reg_rtx (DImode);
1163 }
1164
1165 emit_insn (gen_seth44 (temp1, op1));
1166 emit_insn (gen_setm44 (temp2, temp1, op1));
1167 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1168 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1169 emit_insn (gen_setl44 (op0, temp3, op1));
1170 break;
1171
1172 case CM_MEDANY:
1173 /* The range spanned by all instructions in the object is less
1174 than 2^31 bytes (2GB) and the distance from any instruction
1175 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1176 than 2^31 bytes (2GB).
1177
1178 The executable can be placed anywhere in the virtual address
1179 space.
1180
1181 sethi %hh(symbol), %temp1
1182 sethi %lm(symbol), %temp2
1183 or %temp1, %hm(symbol), %temp3
1184 sllx %temp3, 32, %temp4
1185 or %temp4, %temp2, %temp5
1186 or %temp5, %lo(symbol), %reg */
1187 if (temp)
1188 {
1189 /* It is possible that one of the registers we got for operands[2]
1190 might coincide with that of operands[0] (which is why we made
1191 it TImode). Pick the other one to use as our scratch. */
1192 if (rtx_equal_p (temp, op0))
1193 {
1194 gcc_assert (ti_temp);
1195 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1196 }
1197 temp1 = op0;
1198 temp2 = temp; /* op0 is _not_ allowed, see above. */
1199 temp3 = op0;
1200 temp4 = op0;
1201 temp5 = op0;
1202 }
1203 else
1204 {
1205 temp1 = gen_reg_rtx (DImode);
1206 temp2 = gen_reg_rtx (DImode);
1207 temp3 = gen_reg_rtx (DImode);
1208 temp4 = gen_reg_rtx (DImode);
1209 temp5 = gen_reg_rtx (DImode);
1210 }
1211
1212 emit_insn (gen_sethh (temp1, op1));
1213 emit_insn (gen_setlm (temp2, op1));
1214 emit_insn (gen_sethm (temp3, temp1, op1));
1215 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1216 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1217 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1218 gen_rtx_PLUS (DImode, temp4, temp2)));
1219 emit_insn (gen_setlo (op0, temp5, op1));
1220 break;
1221
1222 case CM_EMBMEDANY:
1223 /* Old old old backwards compatibility kruft here.
1224 Essentially it is MEDLOW with a fixed 64-bit
1225 virtual base added to all data segment addresses.
1226 Text-segment stuff is computed like MEDANY, we can't
1227 reuse the code above because the relocation knobs
1228 look different.
1229
1230 Data segment: sethi %hi(symbol), %temp1
1231 add %temp1, EMBMEDANY_BASE_REG, %temp2
1232 or %temp2, %lo(symbol), %reg */
1233 if (data_segment_operand (op1, GET_MODE (op1)))
1234 {
1235 if (temp)
1236 {
1237 temp1 = temp; /* op0 is allowed. */
1238 temp2 = op0;
1239 }
1240 else
1241 {
1242 temp1 = gen_reg_rtx (DImode);
1243 temp2 = gen_reg_rtx (DImode);
1244 }
1245
1246 emit_insn (gen_embmedany_sethi (temp1, op1));
1247 emit_insn (gen_embmedany_brsum (temp2, temp1));
1248 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1249 }
1250
1251 /* Text segment: sethi %uhi(symbol), %temp1
1252 sethi %hi(symbol), %temp2
1253 or %temp1, %ulo(symbol), %temp3
1254 sllx %temp3, 32, %temp4
1255 or %temp4, %temp2, %temp5
1256 or %temp5, %lo(symbol), %reg */
1257 else
1258 {
1259 if (temp)
1260 {
1261 /* It is possible that one of the registers we got for operands[2]
1262 might coincide with that of operands[0] (which is why we made
1263 it TImode). Pick the other one to use as our scratch. */
1264 if (rtx_equal_p (temp, op0))
1265 {
1266 gcc_assert (ti_temp);
1267 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1268 }
1269 temp1 = op0;
1270 temp2 = temp; /* op0 is _not_ allowed, see above. */
1271 temp3 = op0;
1272 temp4 = op0;
1273 temp5 = op0;
1274 }
1275 else
1276 {
1277 temp1 = gen_reg_rtx (DImode);
1278 temp2 = gen_reg_rtx (DImode);
1279 temp3 = gen_reg_rtx (DImode);
1280 temp4 = gen_reg_rtx (DImode);
1281 temp5 = gen_reg_rtx (DImode);
1282 }
1283
1284 emit_insn (gen_embmedany_textuhi (temp1, op1));
1285 emit_insn (gen_embmedany_texthi (temp2, op1));
1286 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1287 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1288 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1289 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1290 gen_rtx_PLUS (DImode, temp4, temp2)));
1291 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1292 }
1293 break;
1294
1295 default:
1296 gcc_unreachable ();
1297 }
1298 }
1299
1300 #if HOST_BITS_PER_WIDE_INT == 32
1301 void
1302 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1303 {
1304 gcc_unreachable ();
1305 }
1306 #else
1307 /* These avoid problems when cross compiling. If we do not
1308 go through all this hair then the optimizer will see
1309 invalid REG_EQUAL notes or in some cases none at all. */
1310 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1311 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1312 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1313 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1314
1315 /* The optimizer is not to assume anything about exactly
1316 which bits are set for a HIGH, they are unspecified.
1317 Unfortunately this leads to many missed optimizations
1318 during CSE. We mask out the non-HIGH bits, and matches
1319 a plain movdi, to alleviate this problem. */
1320 static rtx
1321 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1322 {
1323 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1324 }
1325
1326 static rtx
1327 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1328 {
1329 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1330 }
1331
1332 static rtx
1333 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1334 {
1335 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1336 }
1337
1338 static rtx
1339 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1340 {
1341 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1342 }
1343
1344 /* Worker routines for 64-bit constant formation on arch64.
1345 One of the key things to be doing in these emissions is
1346 to create as many temp REGs as possible. This makes it
1347 possible for half-built constants to be used later when
1348 such values are similar to something required later on.
1349 Without doing this, the optimizer cannot see such
1350 opportunities. */
1351
1352 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1353 unsigned HOST_WIDE_INT, int);
1354
1355 static void
1356 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1357 unsigned HOST_WIDE_INT low_bits, int is_neg)
1358 {
1359 unsigned HOST_WIDE_INT high_bits;
1360
1361 if (is_neg)
1362 high_bits = (~low_bits) & 0xffffffff;
1363 else
1364 high_bits = low_bits;
1365
1366 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1367 if (!is_neg)
1368 {
1369 emit_insn (gen_rtx_SET (VOIDmode, op0,
1370 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1371 }
1372 else
1373 {
1374 /* If we are XOR'ing with -1, then we should emit a one's complement
1375 instead. This way the combiner will notice logical operations
1376 such as ANDN later on and substitute. */
1377 if ((low_bits & 0x3ff) == 0x3ff)
1378 {
1379 emit_insn (gen_rtx_SET (VOIDmode, op0,
1380 gen_rtx_NOT (DImode, temp)));
1381 }
1382 else
1383 {
1384 emit_insn (gen_rtx_SET (VOIDmode, op0,
1385 gen_safe_XOR64 (temp,
1386 (-(HOST_WIDE_INT)0x400
1387 | (low_bits & 0x3ff)))));
1388 }
1389 }
1390 }
1391
1392 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1393 unsigned HOST_WIDE_INT, int);
1394
1395 static void
1396 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1397 unsigned HOST_WIDE_INT high_bits,
1398 unsigned HOST_WIDE_INT low_immediate,
1399 int shift_count)
1400 {
1401 rtx temp2 = op0;
1402
1403 if ((high_bits & 0xfffffc00) != 0)
1404 {
1405 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1406 if ((high_bits & ~0xfffffc00) != 0)
1407 emit_insn (gen_rtx_SET (VOIDmode, op0,
1408 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1409 else
1410 temp2 = temp;
1411 }
1412 else
1413 {
1414 emit_insn (gen_safe_SET64 (temp, high_bits));
1415 temp2 = temp;
1416 }
1417
1418 /* Now shift it up into place. */
1419 emit_insn (gen_rtx_SET (VOIDmode, op0,
1420 gen_rtx_ASHIFT (DImode, temp2,
1421 GEN_INT (shift_count))));
1422
1423 /* If there is a low immediate part piece, finish up by
1424 putting that in as well. */
1425 if (low_immediate != 0)
1426 emit_insn (gen_rtx_SET (VOIDmode, op0,
1427 gen_safe_OR64 (op0, low_immediate)));
1428 }
1429
1430 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1431 unsigned HOST_WIDE_INT);
1432
1433 /* Full 64-bit constant decomposition. Even though this is the
1434 'worst' case, we still optimize a few things away. */
1435 static void
1436 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1437 unsigned HOST_WIDE_INT high_bits,
1438 unsigned HOST_WIDE_INT low_bits)
1439 {
1440 rtx sub_temp;
1441
1442 if (reload_in_progress || reload_completed)
1443 sub_temp = op0;
1444 else
1445 sub_temp = gen_reg_rtx (DImode);
1446
1447 if ((high_bits & 0xfffffc00) != 0)
1448 {
1449 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1450 if ((high_bits & ~0xfffffc00) != 0)
1451 emit_insn (gen_rtx_SET (VOIDmode,
1452 sub_temp,
1453 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1454 else
1455 sub_temp = temp;
1456 }
1457 else
1458 {
1459 emit_insn (gen_safe_SET64 (temp, high_bits));
1460 sub_temp = temp;
1461 }
1462
1463 if (!reload_in_progress && !reload_completed)
1464 {
1465 rtx temp2 = gen_reg_rtx (DImode);
1466 rtx temp3 = gen_reg_rtx (DImode);
1467 rtx temp4 = gen_reg_rtx (DImode);
1468
1469 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1470 gen_rtx_ASHIFT (DImode, sub_temp,
1471 GEN_INT (32))));
1472
1473 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1474 if ((low_bits & ~0xfffffc00) != 0)
1475 {
1476 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1477 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1478 emit_insn (gen_rtx_SET (VOIDmode, op0,
1479 gen_rtx_PLUS (DImode, temp4, temp3)));
1480 }
1481 else
1482 {
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_rtx_PLUS (DImode, temp4, temp2)));
1485 }
1486 }
1487 else
1488 {
1489 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1490 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1491 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1492 int to_shift = 12;
1493
1494 /* We are in the middle of reload, so this is really
1495 painful. However we do still make an attempt to
1496 avoid emitting truly stupid code. */
1497 if (low1 != const0_rtx)
1498 {
1499 emit_insn (gen_rtx_SET (VOIDmode, op0,
1500 gen_rtx_ASHIFT (DImode, sub_temp,
1501 GEN_INT (to_shift))));
1502 emit_insn (gen_rtx_SET (VOIDmode, op0,
1503 gen_rtx_IOR (DImode, op0, low1)));
1504 sub_temp = op0;
1505 to_shift = 12;
1506 }
1507 else
1508 {
1509 to_shift += 12;
1510 }
1511 if (low2 != const0_rtx)
1512 {
1513 emit_insn (gen_rtx_SET (VOIDmode, op0,
1514 gen_rtx_ASHIFT (DImode, sub_temp,
1515 GEN_INT (to_shift))));
1516 emit_insn (gen_rtx_SET (VOIDmode, op0,
1517 gen_rtx_IOR (DImode, op0, low2)));
1518 sub_temp = op0;
1519 to_shift = 8;
1520 }
1521 else
1522 {
1523 to_shift += 8;
1524 }
1525 emit_insn (gen_rtx_SET (VOIDmode, op0,
1526 gen_rtx_ASHIFT (DImode, sub_temp,
1527 GEN_INT (to_shift))));
1528 if (low3 != const0_rtx)
1529 emit_insn (gen_rtx_SET (VOIDmode, op0,
1530 gen_rtx_IOR (DImode, op0, low3)));
1531 /* phew... */
1532 }
1533 }
1534
1535 /* Analyze a 64-bit constant for certain properties. */
1536 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1537 unsigned HOST_WIDE_INT,
1538 int *, int *, int *);
1539
1540 static void
1541 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1542 unsigned HOST_WIDE_INT low_bits,
1543 int *hbsp, int *lbsp, int *abbasp)
1544 {
1545 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1546 int i;
1547
1548 lowest_bit_set = highest_bit_set = -1;
1549 i = 0;
1550 do
1551 {
1552 if ((lowest_bit_set == -1)
1553 && ((low_bits >> i) & 1))
1554 lowest_bit_set = i;
1555 if ((highest_bit_set == -1)
1556 && ((high_bits >> (32 - i - 1)) & 1))
1557 highest_bit_set = (64 - i - 1);
1558 }
1559 while (++i < 32
1560 && ((highest_bit_set == -1)
1561 || (lowest_bit_set == -1)));
1562 if (i == 32)
1563 {
1564 i = 0;
1565 do
1566 {
1567 if ((lowest_bit_set == -1)
1568 && ((high_bits >> i) & 1))
1569 lowest_bit_set = i + 32;
1570 if ((highest_bit_set == -1)
1571 && ((low_bits >> (32 - i - 1)) & 1))
1572 highest_bit_set = 32 - i - 1;
1573 }
1574 while (++i < 32
1575 && ((highest_bit_set == -1)
1576 || (lowest_bit_set == -1)));
1577 }
1578 /* If there are no bits set this should have gone out
1579 as one instruction! */
1580 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1581 all_bits_between_are_set = 1;
1582 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1583 {
1584 if (i < 32)
1585 {
1586 if ((low_bits & (1 << i)) != 0)
1587 continue;
1588 }
1589 else
1590 {
1591 if ((high_bits & (1 << (i - 32))) != 0)
1592 continue;
1593 }
1594 all_bits_between_are_set = 0;
1595 break;
1596 }
1597 *hbsp = highest_bit_set;
1598 *lbsp = lowest_bit_set;
1599 *abbasp = all_bits_between_are_set;
1600 }
1601
1602 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1603
1604 static int
1605 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1606 unsigned HOST_WIDE_INT low_bits)
1607 {
1608 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1609
1610 if (high_bits == 0
1611 || high_bits == 0xffffffff)
1612 return 1;
1613
1614 analyze_64bit_constant (high_bits, low_bits,
1615 &highest_bit_set, &lowest_bit_set,
1616 &all_bits_between_are_set);
1617
1618 if ((highest_bit_set == 63
1619 || lowest_bit_set == 0)
1620 && all_bits_between_are_set != 0)
1621 return 1;
1622
1623 if ((highest_bit_set - lowest_bit_set) < 21)
1624 return 1;
1625
1626 return 0;
1627 }
1628
1629 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1630 unsigned HOST_WIDE_INT,
1631 int, int);
1632
1633 static unsigned HOST_WIDE_INT
1634 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1635 unsigned HOST_WIDE_INT low_bits,
1636 int lowest_bit_set, int shift)
1637 {
1638 HOST_WIDE_INT hi, lo;
1639
1640 if (lowest_bit_set < 32)
1641 {
1642 lo = (low_bits >> lowest_bit_set) << shift;
1643 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1644 }
1645 else
1646 {
1647 lo = 0;
1648 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1649 }
1650 gcc_assert (! (hi & lo));
1651 return (hi | lo);
1652 }
1653
1654 /* Here we are sure to be arch64 and this is an integer constant
1655 being loaded into a register. Emit the most efficient
1656 insn sequence possible. Detection of all the 1-insn cases
1657 has been done already. */
1658 void
1659 sparc_emit_set_const64 (rtx op0, rtx op1)
1660 {
1661 unsigned HOST_WIDE_INT high_bits, low_bits;
1662 int lowest_bit_set, highest_bit_set;
1663 int all_bits_between_are_set;
1664 rtx temp = 0;
1665
1666 /* Sanity check that we know what we are working with. */
1667 gcc_assert (TARGET_ARCH64
1668 && (GET_CODE (op0) == SUBREG
1669 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1670
1671 if (reload_in_progress || reload_completed)
1672 temp = op0;
1673
1674 if (GET_CODE (op1) != CONST_INT)
1675 {
1676 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1677 return;
1678 }
1679
1680 if (! temp)
1681 temp = gen_reg_rtx (DImode);
1682
1683 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1684 low_bits = (INTVAL (op1) & 0xffffffff);
1685
1686 /* low_bits bits 0 --> 31
1687 high_bits bits 32 --> 63 */
1688
1689 analyze_64bit_constant (high_bits, low_bits,
1690 &highest_bit_set, &lowest_bit_set,
1691 &all_bits_between_are_set);
1692
1693 /* First try for a 2-insn sequence. */
1694
1695 /* These situations are preferred because the optimizer can
1696 * do more things with them:
1697 * 1) mov -1, %reg
1698 * sllx %reg, shift, %reg
1699 * 2) mov -1, %reg
1700 * srlx %reg, shift, %reg
1701 * 3) mov some_small_const, %reg
1702 * sllx %reg, shift, %reg
1703 */
1704 if (((highest_bit_set == 63
1705 || lowest_bit_set == 0)
1706 && all_bits_between_are_set != 0)
1707 || ((highest_bit_set - lowest_bit_set) < 12))
1708 {
1709 HOST_WIDE_INT the_const = -1;
1710 int shift = lowest_bit_set;
1711
1712 if ((highest_bit_set != 63
1713 && lowest_bit_set != 0)
1714 || all_bits_between_are_set == 0)
1715 {
1716 the_const =
1717 create_simple_focus_bits (high_bits, low_bits,
1718 lowest_bit_set, 0);
1719 }
1720 else if (lowest_bit_set == 0)
1721 shift = -(63 - highest_bit_set);
1722
1723 gcc_assert (SPARC_SIMM13_P (the_const));
1724 gcc_assert (shift != 0);
1725
1726 emit_insn (gen_safe_SET64 (temp, the_const));
1727 if (shift > 0)
1728 emit_insn (gen_rtx_SET (VOIDmode,
1729 op0,
1730 gen_rtx_ASHIFT (DImode,
1731 temp,
1732 GEN_INT (shift))));
1733 else if (shift < 0)
1734 emit_insn (gen_rtx_SET (VOIDmode,
1735 op0,
1736 gen_rtx_LSHIFTRT (DImode,
1737 temp,
1738 GEN_INT (-shift))));
1739 return;
1740 }
1741
1742 /* Now a range of 22 or less bits set somewhere.
1743 * 1) sethi %hi(focus_bits), %reg
1744 * sllx %reg, shift, %reg
1745 * 2) sethi %hi(focus_bits), %reg
1746 * srlx %reg, shift, %reg
1747 */
1748 if ((highest_bit_set - lowest_bit_set) < 21)
1749 {
1750 unsigned HOST_WIDE_INT focus_bits =
1751 create_simple_focus_bits (high_bits, low_bits,
1752 lowest_bit_set, 10);
1753
1754 gcc_assert (SPARC_SETHI_P (focus_bits));
1755 gcc_assert (lowest_bit_set != 10);
1756
1757 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1758
1759 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1760 if (lowest_bit_set < 10)
1761 emit_insn (gen_rtx_SET (VOIDmode,
1762 op0,
1763 gen_rtx_LSHIFTRT (DImode, temp,
1764 GEN_INT (10 - lowest_bit_set))));
1765 else if (lowest_bit_set > 10)
1766 emit_insn (gen_rtx_SET (VOIDmode,
1767 op0,
1768 gen_rtx_ASHIFT (DImode, temp,
1769 GEN_INT (lowest_bit_set - 10))));
1770 return;
1771 }
1772
1773 /* 1) sethi %hi(low_bits), %reg
1774 * or %reg, %lo(low_bits), %reg
1775 * 2) sethi %hi(~low_bits), %reg
1776 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1777 */
1778 if (high_bits == 0
1779 || high_bits == 0xffffffff)
1780 {
1781 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1782 (high_bits == 0xffffffff));
1783 return;
1784 }
1785
1786 /* Now, try 3-insn sequences. */
1787
1788 /* 1) sethi %hi(high_bits), %reg
1789 * or %reg, %lo(high_bits), %reg
1790 * sllx %reg, 32, %reg
1791 */
1792 if (low_bits == 0)
1793 {
1794 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1795 return;
1796 }
1797
1798 /* We may be able to do something quick
1799 when the constant is negated, so try that. */
1800 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1801 (~low_bits) & 0xfffffc00))
1802 {
1803 /* NOTE: The trailing bits get XOR'd so we need the
1804 non-negated bits, not the negated ones. */
1805 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1806
1807 if ((((~high_bits) & 0xffffffff) == 0
1808 && ((~low_bits) & 0x80000000) == 0)
1809 || (((~high_bits) & 0xffffffff) == 0xffffffff
1810 && ((~low_bits) & 0x80000000) != 0))
1811 {
1812 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1813
1814 if ((SPARC_SETHI_P (fast_int)
1815 && (~high_bits & 0xffffffff) == 0)
1816 || SPARC_SIMM13_P (fast_int))
1817 emit_insn (gen_safe_SET64 (temp, fast_int));
1818 else
1819 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1820 }
1821 else
1822 {
1823 rtx negated_const;
1824 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1825 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1826 sparc_emit_set_const64 (temp, negated_const);
1827 }
1828
1829 /* If we are XOR'ing with -1, then we should emit a one's complement
1830 instead. This way the combiner will notice logical operations
1831 such as ANDN later on and substitute. */
1832 if (trailing_bits == 0x3ff)
1833 {
1834 emit_insn (gen_rtx_SET (VOIDmode, op0,
1835 gen_rtx_NOT (DImode, temp)));
1836 }
1837 else
1838 {
1839 emit_insn (gen_rtx_SET (VOIDmode,
1840 op0,
1841 gen_safe_XOR64 (temp,
1842 (-0x400 | trailing_bits))));
1843 }
1844 return;
1845 }
1846
1847 /* 1) sethi %hi(xxx), %reg
1848 * or %reg, %lo(xxx), %reg
1849 * sllx %reg, yyy, %reg
1850 *
1851 * ??? This is just a generalized version of the low_bits==0
1852 * thing above, FIXME...
1853 */
1854 if ((highest_bit_set - lowest_bit_set) < 32)
1855 {
1856 unsigned HOST_WIDE_INT focus_bits =
1857 create_simple_focus_bits (high_bits, low_bits,
1858 lowest_bit_set, 0);
1859
1860 /* We can't get here in this state. */
1861 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1862
1863 /* So what we know is that the set bits straddle the
1864 middle of the 64-bit word. */
1865 sparc_emit_set_const64_quick2 (op0, temp,
1866 focus_bits, 0,
1867 lowest_bit_set);
1868 return;
1869 }
1870
1871 /* 1) sethi %hi(high_bits), %reg
1872 * or %reg, %lo(high_bits), %reg
1873 * sllx %reg, 32, %reg
1874 * or %reg, low_bits, %reg
1875 */
1876 if (SPARC_SIMM13_P(low_bits)
1877 && ((int)low_bits > 0))
1878 {
1879 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1880 return;
1881 }
1882
1883 /* The easiest way when all else fails, is full decomposition. */
1884 #if 0
1885 printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1886 high_bits, low_bits, ~high_bits, ~low_bits);
1887 #endif
1888 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1889 }
1890 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1891
1892 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1893 return the mode to be used for the comparison. For floating-point,
1894 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1895 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1896 processing is needed. */
1897
1898 enum machine_mode
1899 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1900 {
1901 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1902 {
1903 switch (op)
1904 {
1905 case EQ:
1906 case NE:
1907 case UNORDERED:
1908 case ORDERED:
1909 case UNLT:
1910 case UNLE:
1911 case UNGT:
1912 case UNGE:
1913 case UNEQ:
1914 case LTGT:
1915 return CCFPmode;
1916
1917 case LT:
1918 case LE:
1919 case GT:
1920 case GE:
1921 return CCFPEmode;
1922
1923 default:
1924 gcc_unreachable ();
1925 }
1926 }
1927 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1928 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1929 {
1930 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1931 return CCX_NOOVmode;
1932 else
1933 return CC_NOOVmode;
1934 }
1935 else
1936 {
1937 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1938 return CCXmode;
1939 else
1940 return CCmode;
1941 }
1942 }
1943
1944 /* X and Y are two things to compare using CODE. Emit the compare insn and
1945 return the rtx for the cc reg in the proper mode. */
1946
1947 rtx
1948 gen_compare_reg (enum rtx_code code)
1949 {
1950 rtx x = sparc_compare_op0;
1951 rtx y = sparc_compare_op1;
1952 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1953 rtx cc_reg;
1954
1955 if (sparc_compare_emitted != NULL_RTX)
1956 {
1957 cc_reg = sparc_compare_emitted;
1958 sparc_compare_emitted = NULL_RTX;
1959 return cc_reg;
1960 }
1961
1962 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1963 fcc regs (cse can't tell they're really call clobbered regs and will
1964 remove a duplicate comparison even if there is an intervening function
1965 call - it will then try to reload the cc reg via an int reg which is why
1966 we need the movcc patterns). It is possible to provide the movcc
1967 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
1968 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
1969 to tell cse that CCFPE mode registers (even pseudos) are call
1970 clobbered. */
1971
1972 /* ??? This is an experiment. Rather than making changes to cse which may
1973 or may not be easy/clean, we do our own cse. This is possible because
1974 we will generate hard registers. Cse knows they're call clobbered (it
1975 doesn't know the same thing about pseudos). If we guess wrong, no big
1976 deal, but if we win, great! */
1977
1978 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1979 #if 1 /* experiment */
1980 {
1981 int reg;
1982 /* We cycle through the registers to ensure they're all exercised. */
1983 static int next_fcc_reg = 0;
1984 /* Previous x,y for each fcc reg. */
1985 static rtx prev_args[4][2];
1986
1987 /* Scan prev_args for x,y. */
1988 for (reg = 0; reg < 4; reg++)
1989 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1990 break;
1991 if (reg == 4)
1992 {
1993 reg = next_fcc_reg;
1994 prev_args[reg][0] = x;
1995 prev_args[reg][1] = y;
1996 next_fcc_reg = (next_fcc_reg + 1) & 3;
1997 }
1998 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
1999 }
2000 #else
2001 cc_reg = gen_reg_rtx (mode);
2002 #endif /* ! experiment */
2003 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2004 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2005 else
2006 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2007
2008 emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2009 gen_rtx_COMPARE (mode, x, y)));
2010
2011 return cc_reg;
2012 }
2013
2014 /* This function is used for v9 only.
2015 CODE is the code for an Scc's comparison.
2016 OPERANDS[0] is the target of the Scc insn.
2017 OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2018 been generated yet).
2019
2020 This function is needed to turn
2021
2022 (set (reg:SI 110)
2023 (gt (reg:CCX 100 %icc)
2024 (const_int 0)))
2025 into
2026 (set (reg:SI 110)
2027 (gt:DI (reg:CCX 100 %icc)
2028 (const_int 0)))
2029
2030 IE: The instruction recognizer needs to see the mode of the comparison to
2031 find the right instruction. We could use "gt:DI" right in the
2032 define_expand, but leaving it out allows us to handle DI, SI, etc.
2033
2034 We refer to the global sparc compare operands sparc_compare_op0 and
2035 sparc_compare_op1. */
2036
2037 int
2038 gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2039 {
2040 if (! TARGET_ARCH64
2041 && (GET_MODE (sparc_compare_op0) == DImode
2042 || GET_MODE (operands[0]) == DImode))
2043 return 0;
2044
2045 /* Try to use the movrCC insns. */
2046 if (TARGET_ARCH64
2047 && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2048 && sparc_compare_op1 == const0_rtx
2049 && v9_regcmp_p (compare_code))
2050 {
2051 rtx op0 = sparc_compare_op0;
2052 rtx temp;
2053
2054 /* Special case for op0 != 0. This can be done with one instruction if
2055 operands[0] == sparc_compare_op0. */
2056
2057 if (compare_code == NE
2058 && GET_MODE (operands[0]) == DImode
2059 && rtx_equal_p (op0, operands[0]))
2060 {
2061 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2062 gen_rtx_IF_THEN_ELSE (DImode,
2063 gen_rtx_fmt_ee (compare_code, DImode,
2064 op0, const0_rtx),
2065 const1_rtx,
2066 operands[0])));
2067 return 1;
2068 }
2069
2070 if (reg_overlap_mentioned_p (operands[0], op0))
2071 {
2072 /* Handle the case where operands[0] == sparc_compare_op0.
2073 We "early clobber" the result. */
2074 op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2075 emit_move_insn (op0, sparc_compare_op0);
2076 }
2077
2078 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2079 if (GET_MODE (op0) != DImode)
2080 {
2081 temp = gen_reg_rtx (DImode);
2082 convert_move (temp, op0, 0);
2083 }
2084 else
2085 temp = op0;
2086 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2087 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2088 gen_rtx_fmt_ee (compare_code, DImode,
2089 temp, const0_rtx),
2090 const1_rtx,
2091 operands[0])));
2092 return 1;
2093 }
2094 else
2095 {
2096 operands[1] = gen_compare_reg (compare_code);
2097
2098 switch (GET_MODE (operands[1]))
2099 {
2100 case CCmode :
2101 case CCXmode :
2102 case CCFPEmode :
2103 case CCFPmode :
2104 break;
2105 default :
2106 gcc_unreachable ();
2107 }
2108 emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2109 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2110 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2111 gen_rtx_fmt_ee (compare_code,
2112 GET_MODE (operands[1]),
2113 operands[1], const0_rtx),
2114 const1_rtx, operands[0])));
2115 return 1;
2116 }
2117 }
2118
2119 /* Emit a conditional jump insn for the v9 architecture using comparison code
2120 CODE and jump target LABEL.
2121 This function exists to take advantage of the v9 brxx insns. */
2122
2123 void
2124 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2125 {
2126 gcc_assert (sparc_compare_emitted == NULL_RTX);
2127 emit_jump_insn (gen_rtx_SET (VOIDmode,
2128 pc_rtx,
2129 gen_rtx_IF_THEN_ELSE (VOIDmode,
2130 gen_rtx_fmt_ee (code, GET_MODE (op0),
2131 op0, const0_rtx),
2132 gen_rtx_LABEL_REF (VOIDmode, label),
2133 pc_rtx)));
2134 }
2135
2136 /* Generate a DFmode part of a hard TFmode register.
2137 REG is the TFmode hard register, LOW is 1 for the
2138 low 64bit of the register and 0 otherwise.
2139 */
2140 rtx
2141 gen_df_reg (rtx reg, int low)
2142 {
2143 int regno = REGNO (reg);
2144
2145 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2146 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2147 return gen_rtx_REG (DFmode, regno);
2148 }
2149 \f
2150 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2151 Unlike normal calls, TFmode operands are passed by reference. It is
2152 assumed that no more than 3 operands are required. */
2153
2154 static void
2155 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2156 {
2157 rtx ret_slot = NULL, arg[3], func_sym;
2158 int i;
2159
2160 /* We only expect to be called for conversions, unary, and binary ops. */
2161 gcc_assert (nargs == 2 || nargs == 3);
2162
2163 for (i = 0; i < nargs; ++i)
2164 {
2165 rtx this_arg = operands[i];
2166 rtx this_slot;
2167
2168 /* TFmode arguments and return values are passed by reference. */
2169 if (GET_MODE (this_arg) == TFmode)
2170 {
2171 int force_stack_temp;
2172
2173 force_stack_temp = 0;
2174 if (TARGET_BUGGY_QP_LIB && i == 0)
2175 force_stack_temp = 1;
2176
2177 if (GET_CODE (this_arg) == MEM
2178 && ! force_stack_temp)
2179 this_arg = XEXP (this_arg, 0);
2180 else if (CONSTANT_P (this_arg)
2181 && ! force_stack_temp)
2182 {
2183 this_slot = force_const_mem (TFmode, this_arg);
2184 this_arg = XEXP (this_slot, 0);
2185 }
2186 else
2187 {
2188 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2189
2190 /* Operand 0 is the return value. We'll copy it out later. */
2191 if (i > 0)
2192 emit_move_insn (this_slot, this_arg);
2193 else
2194 ret_slot = this_slot;
2195
2196 this_arg = XEXP (this_slot, 0);
2197 }
2198 }
2199
2200 arg[i] = this_arg;
2201 }
2202
2203 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2204
2205 if (GET_MODE (operands[0]) == TFmode)
2206 {
2207 if (nargs == 2)
2208 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2209 arg[0], GET_MODE (arg[0]),
2210 arg[1], GET_MODE (arg[1]));
2211 else
2212 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2213 arg[0], GET_MODE (arg[0]),
2214 arg[1], GET_MODE (arg[1]),
2215 arg[2], GET_MODE (arg[2]));
2216
2217 if (ret_slot)
2218 emit_move_insn (operands[0], ret_slot);
2219 }
2220 else
2221 {
2222 rtx ret;
2223
2224 gcc_assert (nargs == 2);
2225
2226 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2227 GET_MODE (operands[0]), 1,
2228 arg[1], GET_MODE (arg[1]));
2229
2230 if (ret != operands[0])
2231 emit_move_insn (operands[0], ret);
2232 }
2233 }
2234
2235 /* Expand soft-float TFmode calls to sparc abi routines. */
2236
2237 static void
2238 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2239 {
2240 const char *func;
2241
2242 switch (code)
2243 {
2244 case PLUS:
2245 func = "_Qp_add";
2246 break;
2247 case MINUS:
2248 func = "_Qp_sub";
2249 break;
2250 case MULT:
2251 func = "_Qp_mul";
2252 break;
2253 case DIV:
2254 func = "_Qp_div";
2255 break;
2256 default:
2257 gcc_unreachable ();
2258 }
2259
2260 emit_soft_tfmode_libcall (func, 3, operands);
2261 }
2262
2263 static void
2264 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2265 {
2266 const char *func;
2267
2268 gcc_assert (code == SQRT);
2269 func = "_Qp_sqrt";
2270
2271 emit_soft_tfmode_libcall (func, 2, operands);
2272 }
2273
2274 static void
2275 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2276 {
2277 const char *func;
2278
2279 switch (code)
2280 {
2281 case FLOAT_EXTEND:
2282 switch (GET_MODE (operands[1]))
2283 {
2284 case SFmode:
2285 func = "_Qp_stoq";
2286 break;
2287 case DFmode:
2288 func = "_Qp_dtoq";
2289 break;
2290 default:
2291 gcc_unreachable ();
2292 }
2293 break;
2294
2295 case FLOAT_TRUNCATE:
2296 switch (GET_MODE (operands[0]))
2297 {
2298 case SFmode:
2299 func = "_Qp_qtos";
2300 break;
2301 case DFmode:
2302 func = "_Qp_qtod";
2303 break;
2304 default:
2305 gcc_unreachable ();
2306 }
2307 break;
2308
2309 case FLOAT:
2310 switch (GET_MODE (operands[1]))
2311 {
2312 case SImode:
2313 func = "_Qp_itoq";
2314 break;
2315 case DImode:
2316 func = "_Qp_xtoq";
2317 break;
2318 default:
2319 gcc_unreachable ();
2320 }
2321 break;
2322
2323 case UNSIGNED_FLOAT:
2324 switch (GET_MODE (operands[1]))
2325 {
2326 case SImode:
2327 func = "_Qp_uitoq";
2328 break;
2329 case DImode:
2330 func = "_Qp_uxtoq";
2331 break;
2332 default:
2333 gcc_unreachable ();
2334 }
2335 break;
2336
2337 case FIX:
2338 switch (GET_MODE (operands[0]))
2339 {
2340 case SImode:
2341 func = "_Qp_qtoi";
2342 break;
2343 case DImode:
2344 func = "_Qp_qtox";
2345 break;
2346 default:
2347 gcc_unreachable ();
2348 }
2349 break;
2350
2351 case UNSIGNED_FIX:
2352 switch (GET_MODE (operands[0]))
2353 {
2354 case SImode:
2355 func = "_Qp_qtoui";
2356 break;
2357 case DImode:
2358 func = "_Qp_qtoux";
2359 break;
2360 default:
2361 gcc_unreachable ();
2362 }
2363 break;
2364
2365 default:
2366 gcc_unreachable ();
2367 }
2368
2369 emit_soft_tfmode_libcall (func, 2, operands);
2370 }
2371
2372 /* Expand a hard-float tfmode operation. All arguments must be in
2373 registers. */
2374
2375 static void
2376 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2377 {
2378 rtx op, dest;
2379
2380 if (GET_RTX_CLASS (code) == RTX_UNARY)
2381 {
2382 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2383 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2384 }
2385 else
2386 {
2387 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2388 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2389 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2390 operands[1], operands[2]);
2391 }
2392
2393 if (register_operand (operands[0], VOIDmode))
2394 dest = operands[0];
2395 else
2396 dest = gen_reg_rtx (GET_MODE (operands[0]));
2397
2398 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2399
2400 if (dest != operands[0])
2401 emit_move_insn (operands[0], dest);
2402 }
2403
2404 void
2405 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2406 {
2407 if (TARGET_HARD_QUAD)
2408 emit_hard_tfmode_operation (code, operands);
2409 else
2410 emit_soft_tfmode_binop (code, operands);
2411 }
2412
2413 void
2414 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2415 {
2416 if (TARGET_HARD_QUAD)
2417 emit_hard_tfmode_operation (code, operands);
2418 else
2419 emit_soft_tfmode_unop (code, operands);
2420 }
2421
2422 void
2423 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2424 {
2425 if (TARGET_HARD_QUAD)
2426 emit_hard_tfmode_operation (code, operands);
2427 else
2428 emit_soft_tfmode_cvt (code, operands);
2429 }
2430 \f
2431 /* Return nonzero if a branch/jump/call instruction will be emitting
2432 nop into its delay slot. */
2433
2434 int
2435 empty_delay_slot (rtx insn)
2436 {
2437 rtx seq;
2438
2439 /* If no previous instruction (should not happen), return true. */
2440 if (PREV_INSN (insn) == NULL)
2441 return 1;
2442
2443 seq = NEXT_INSN (PREV_INSN (insn));
2444 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2445 return 0;
2446
2447 return 1;
2448 }
2449
2450 /* Return nonzero if TRIAL can go into the call delay slot. */
2451
2452 int
2453 tls_call_delay (rtx trial)
2454 {
2455 rtx pat;
2456
2457 /* Binutils allows
2458 call __tls_get_addr, %tgd_call (foo)
2459 add %l7, %o0, %o0, %tgd_add (foo)
2460 while Sun as/ld does not. */
2461 if (TARGET_GNU_TLS || !TARGET_TLS)
2462 return 1;
2463
2464 pat = PATTERN (trial);
2465
2466 /* We must reject tgd_add{32|64}, i.e.
2467 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2468 and tldm_add{32|64}, i.e.
2469 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2470 for Sun as/ld. */
2471 if (GET_CODE (pat) == SET
2472 && GET_CODE (SET_SRC (pat)) == PLUS)
2473 {
2474 rtx unspec = XEXP (SET_SRC (pat), 1);
2475
2476 if (GET_CODE (unspec) == UNSPEC
2477 && (XINT (unspec, 1) == UNSPEC_TLSGD
2478 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2479 return 0;
2480 }
2481
2482 return 1;
2483 }
2484
2485 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2486 instruction. RETURN_P is true if the v9 variant 'return' is to be
2487 considered in the test too.
2488
2489 TRIAL must be a SET whose destination is a REG appropriate for the
2490 'restore' instruction or, if RETURN_P is true, for the 'return'
2491 instruction. */
2492
2493 static int
2494 eligible_for_restore_insn (rtx trial, bool return_p)
2495 {
2496 rtx pat = PATTERN (trial);
2497 rtx src = SET_SRC (pat);
2498
2499 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2500 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2501 && arith_operand (src, GET_MODE (src)))
2502 {
2503 if (TARGET_ARCH64)
2504 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2505 else
2506 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2507 }
2508
2509 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2510 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2511 && arith_double_operand (src, GET_MODE (src)))
2512 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2513
2514 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2515 else if (! TARGET_FPU && register_operand (src, SFmode))
2516 return 1;
2517
2518 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2519 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2520 return 1;
2521
2522 /* If we have the 'return' instruction, anything that does not use
2523 local or output registers and can go into a delay slot wins. */
2524 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2525 && (get_attr_in_uncond_branch_delay (trial)
2526 == IN_UNCOND_BRANCH_DELAY_TRUE))
2527 return 1;
2528
2529 /* The 'restore src1,src2,dest' pattern for SImode. */
2530 else if (GET_CODE (src) == PLUS
2531 && register_operand (XEXP (src, 0), SImode)
2532 && arith_operand (XEXP (src, 1), SImode))
2533 return 1;
2534
2535 /* The 'restore src1,src2,dest' pattern for DImode. */
2536 else if (GET_CODE (src) == PLUS
2537 && register_operand (XEXP (src, 0), DImode)
2538 && arith_double_operand (XEXP (src, 1), DImode))
2539 return 1;
2540
2541 /* The 'restore src1,%lo(src2),dest' pattern. */
2542 else if (GET_CODE (src) == LO_SUM
2543 && ! TARGET_CM_MEDMID
2544 && ((register_operand (XEXP (src, 0), SImode)
2545 && immediate_operand (XEXP (src, 1), SImode))
2546 || (TARGET_ARCH64
2547 && register_operand (XEXP (src, 0), DImode)
2548 && immediate_operand (XEXP (src, 1), DImode))))
2549 return 1;
2550
2551 /* The 'restore src,src,dest' pattern. */
2552 else if (GET_CODE (src) == ASHIFT
2553 && (register_operand (XEXP (src, 0), SImode)
2554 || register_operand (XEXP (src, 0), DImode))
2555 && XEXP (src, 1) == const1_rtx)
2556 return 1;
2557
2558 return 0;
2559 }
2560
2561 /* Return nonzero if TRIAL can go into the function return's
2562 delay slot. */
2563
2564 int
2565 eligible_for_return_delay (rtx trial)
2566 {
2567 rtx pat;
2568
2569 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2570 return 0;
2571
2572 if (get_attr_length (trial) != 1)
2573 return 0;
2574
2575 /* If there are any call-saved registers, we should scan TRIAL if it
2576 does not reference them. For now just make it easy. */
2577 if (num_gfregs)
2578 return 0;
2579
2580 /* If the function uses __builtin_eh_return, the eh_return machinery
2581 occupies the delay slot. */
2582 if (current_function_calls_eh_return)
2583 return 0;
2584
2585 /* In the case of a true leaf function, anything can go into the slot. */
2586 if (sparc_leaf_function_p)
2587 return get_attr_in_uncond_branch_delay (trial)
2588 == IN_UNCOND_BRANCH_DELAY_TRUE;
2589
2590 pat = PATTERN (trial);
2591
2592 /* Otherwise, only operations which can be done in tandem with
2593 a `restore' or `return' insn can go into the delay slot. */
2594 if (GET_CODE (SET_DEST (pat)) != REG
2595 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2596 return 0;
2597
2598 /* If this instruction sets up floating point register and we have a return
2599 instruction, it can probably go in. But restore will not work
2600 with FP_REGS. */
2601 if (REGNO (SET_DEST (pat)) >= 32)
2602 return (TARGET_V9
2603 && ! epilogue_renumber (&pat, 1)
2604 && (get_attr_in_uncond_branch_delay (trial)
2605 == IN_UNCOND_BRANCH_DELAY_TRUE));
2606
2607 return eligible_for_restore_insn (trial, true);
2608 }
2609
2610 /* Return nonzero if TRIAL can go into the sibling call's
2611 delay slot. */
2612
2613 int
2614 eligible_for_sibcall_delay (rtx trial)
2615 {
2616 rtx pat;
2617
2618 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2619 return 0;
2620
2621 if (get_attr_length (trial) != 1)
2622 return 0;
2623
2624 pat = PATTERN (trial);
2625
2626 if (sparc_leaf_function_p)
2627 {
2628 /* If the tail call is done using the call instruction,
2629 we have to restore %o7 in the delay slot. */
2630 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2631 return 0;
2632
2633 /* %g1 is used to build the function address */
2634 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2635 return 0;
2636
2637 return 1;
2638 }
2639
2640 /* Otherwise, only operations which can be done in tandem with
2641 a `restore' insn can go into the delay slot. */
2642 if (GET_CODE (SET_DEST (pat)) != REG
2643 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2644 || REGNO (SET_DEST (pat)) >= 32)
2645 return 0;
2646
2647 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2648 in most cases. */
2649 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2650 return 0;
2651
2652 return eligible_for_restore_insn (trial, false);
2653 }
2654
2655 int
2656 short_branch (int uid1, int uid2)
2657 {
2658 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2659
2660 /* Leave a few words of "slop". */
2661 if (delta >= -1023 && delta <= 1022)
2662 return 1;
2663
2664 return 0;
2665 }
2666
2667 /* Return nonzero if REG is not used after INSN.
2668 We assume REG is a reload reg, and therefore does
2669 not live past labels or calls or jumps. */
2670 int
2671 reg_unused_after (rtx reg, rtx insn)
2672 {
2673 enum rtx_code code, prev_code = UNKNOWN;
2674
2675 while ((insn = NEXT_INSN (insn)))
2676 {
2677 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2678 return 1;
2679
2680 code = GET_CODE (insn);
2681 if (GET_CODE (insn) == CODE_LABEL)
2682 return 1;
2683
2684 if (INSN_P (insn))
2685 {
2686 rtx set = single_set (insn);
2687 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2688 if (set && in_src)
2689 return 0;
2690 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2691 return 1;
2692 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2693 return 0;
2694 }
2695 prev_code = code;
2696 }
2697 return 1;
2698 }
2699 \f
2700 /* Determine if it's legal to put X into the constant pool. This
2701 is not possible if X contains the address of a symbol that is
2702 not constant (TLS) or not known at final link time (PIC). */
2703
2704 static bool
2705 sparc_cannot_force_const_mem (rtx x)
2706 {
2707 switch (GET_CODE (x))
2708 {
2709 case CONST_INT:
2710 case CONST_DOUBLE:
2711 case CONST_VECTOR:
2712 /* Accept all non-symbolic constants. */
2713 return false;
2714
2715 case LABEL_REF:
2716 /* Labels are OK iff we are non-PIC. */
2717 return flag_pic != 0;
2718
2719 case SYMBOL_REF:
2720 /* 'Naked' TLS symbol references are never OK,
2721 non-TLS symbols are OK iff we are non-PIC. */
2722 if (SYMBOL_REF_TLS_MODEL (x))
2723 return true;
2724 else
2725 return flag_pic != 0;
2726
2727 case CONST:
2728 return sparc_cannot_force_const_mem (XEXP (x, 0));
2729 case PLUS:
2730 case MINUS:
2731 return sparc_cannot_force_const_mem (XEXP (x, 0))
2732 || sparc_cannot_force_const_mem (XEXP (x, 1));
2733 case UNSPEC:
2734 return true;
2735 default:
2736 gcc_unreachable ();
2737 }
2738 }
2739 \f
2740 /* PIC support. */
2741 static GTY(()) char pic_helper_symbol_name[256];
2742 static GTY(()) rtx pic_helper_symbol;
2743 static GTY(()) bool pic_helper_emitted_p = false;
2744 static GTY(()) rtx global_offset_table;
2745
2746 /* Ensure that we are not using patterns that are not OK with PIC. */
2747
2748 int
2749 check_pic (int i)
2750 {
2751 switch (flag_pic)
2752 {
2753 case 1:
2754 gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2755 && (GET_CODE (recog_data.operand[i]) != CONST
2756 || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2757 && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2758 == global_offset_table)
2759 && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2760 == CONST))));
2761 case 2:
2762 default:
2763 return 1;
2764 }
2765 }
2766
2767 /* Return true if X is an address which needs a temporary register when
2768 reloaded while generating PIC code. */
2769
2770 int
2771 pic_address_needs_scratch (rtx x)
2772 {
2773 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
2774 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2775 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2776 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2777 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2778 return 1;
2779
2780 return 0;
2781 }
2782
2783 /* Determine if a given RTX is a valid constant. We already know this
2784 satisfies CONSTANT_P. */
2785
2786 bool
2787 legitimate_constant_p (rtx x)
2788 {
2789 rtx inner;
2790
2791 switch (GET_CODE (x))
2792 {
2793 case SYMBOL_REF:
2794 /* TLS symbols are not constant. */
2795 if (SYMBOL_REF_TLS_MODEL (x))
2796 return false;
2797 break;
2798
2799 case CONST:
2800 inner = XEXP (x, 0);
2801
2802 /* Offsets of TLS symbols are never valid.
2803 Discourage CSE from creating them. */
2804 if (GET_CODE (inner) == PLUS
2805 && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2806 return false;
2807 break;
2808
2809 case CONST_DOUBLE:
2810 if (GET_MODE (x) == VOIDmode)
2811 return true;
2812
2813 /* Floating point constants are generally not ok.
2814 The only exception is 0.0 in VIS. */
2815 if (TARGET_VIS
2816 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2817 && const_zero_operand (x, GET_MODE (x)))
2818 return true;
2819
2820 return false;
2821
2822 case CONST_VECTOR:
2823 /* Vector constants are generally not ok.
2824 The only exception is 0 in VIS. */
2825 if (TARGET_VIS
2826 && const_zero_operand (x, GET_MODE (x)))
2827 return true;
2828
2829 return false;
2830
2831 default:
2832 break;
2833 }
2834
2835 return true;
2836 }
2837
2838 /* Determine if a given RTX is a valid constant address. */
2839
2840 bool
2841 constant_address_p (rtx x)
2842 {
2843 switch (GET_CODE (x))
2844 {
2845 case LABEL_REF:
2846 case CONST_INT:
2847 case HIGH:
2848 return true;
2849
2850 case CONST:
2851 if (flag_pic && pic_address_needs_scratch (x))
2852 return false;
2853 return legitimate_constant_p (x);
2854
2855 case SYMBOL_REF:
2856 return !flag_pic && legitimate_constant_p (x);
2857
2858 default:
2859 return false;
2860 }
2861 }
2862
2863 /* Nonzero if the constant value X is a legitimate general operand
2864 when generating PIC code. It is given that flag_pic is on and
2865 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
2866
2867 bool
2868 legitimate_pic_operand_p (rtx x)
2869 {
2870 if (pic_address_needs_scratch (x))
2871 return false;
2872 if (SPARC_SYMBOL_REF_TLS_P (x)
2873 || (GET_CODE (x) == CONST
2874 && GET_CODE (XEXP (x, 0)) == PLUS
2875 && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2876 return false;
2877 return true;
2878 }
2879
2880 /* Return nonzero if ADDR is a valid memory address.
2881 STRICT specifies whether strict register checking applies. */
2882
2883 int
2884 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2885 {
2886 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2887
2888 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2889 rs1 = addr;
2890 else if (GET_CODE (addr) == PLUS)
2891 {
2892 rs1 = XEXP (addr, 0);
2893 rs2 = XEXP (addr, 1);
2894
2895 /* Canonicalize. REG comes first, if there are no regs,
2896 LO_SUM comes first. */
2897 if (!REG_P (rs1)
2898 && GET_CODE (rs1) != SUBREG
2899 && (REG_P (rs2)
2900 || GET_CODE (rs2) == SUBREG
2901 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2902 {
2903 rs1 = XEXP (addr, 1);
2904 rs2 = XEXP (addr, 0);
2905 }
2906
2907 if ((flag_pic == 1
2908 && rs1 == pic_offset_table_rtx
2909 && !REG_P (rs2)
2910 && GET_CODE (rs2) != SUBREG
2911 && GET_CODE (rs2) != LO_SUM
2912 && GET_CODE (rs2) != MEM
2913 && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2914 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2915 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2916 || ((REG_P (rs1)
2917 || GET_CODE (rs1) == SUBREG)
2918 && RTX_OK_FOR_OFFSET_P (rs2)))
2919 {
2920 imm1 = rs2;
2921 rs2 = NULL;
2922 }
2923 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2924 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2925 {
2926 /* We prohibit REG + REG for TFmode when there are no quad move insns
2927 and we consequently need to split. We do this because REG+REG
2928 is not an offsettable address. If we get the situation in reload
2929 where source and destination of a movtf pattern are both MEMs with
2930 REG+REG address, then only one of them gets converted to an
2931 offsettable address. */
2932 if (mode == TFmode
2933 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2934 return 0;
2935
2936 /* We prohibit REG + REG on ARCH32 if not optimizing for
2937 DFmode/DImode because then mem_min_alignment is likely to be zero
2938 after reload and the forced split would lack a matching splitter
2939 pattern. */
2940 if (TARGET_ARCH32 && !optimize
2941 && (mode == DFmode || mode == DImode))
2942 return 0;
2943 }
2944 else if (USE_AS_OFFSETABLE_LO10
2945 && GET_CODE (rs1) == LO_SUM
2946 && TARGET_ARCH64
2947 && ! TARGET_CM_MEDMID
2948 && RTX_OK_FOR_OLO10_P (rs2))
2949 {
2950 rs2 = NULL;
2951 imm1 = XEXP (rs1, 1);
2952 rs1 = XEXP (rs1, 0);
2953 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2954 return 0;
2955 }
2956 }
2957 else if (GET_CODE (addr) == LO_SUM)
2958 {
2959 rs1 = XEXP (addr, 0);
2960 imm1 = XEXP (addr, 1);
2961
2962 if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2963 return 0;
2964
2965 /* We can't allow TFmode in 32-bit mode, because an offset greater
2966 than the alignment (8) may cause the LO_SUM to overflow. */
2967 if (mode == TFmode && TARGET_ARCH32)
2968 return 0;
2969 }
2970 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2971 return 1;
2972 else
2973 return 0;
2974
2975 if (GET_CODE (rs1) == SUBREG)
2976 rs1 = SUBREG_REG (rs1);
2977 if (!REG_P (rs1))
2978 return 0;
2979
2980 if (rs2)
2981 {
2982 if (GET_CODE (rs2) == SUBREG)
2983 rs2 = SUBREG_REG (rs2);
2984 if (!REG_P (rs2))
2985 return 0;
2986 }
2987
2988 if (strict)
2989 {
2990 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2991 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2992 return 0;
2993 }
2994 else
2995 {
2996 if ((REGNO (rs1) >= 32
2997 && REGNO (rs1) != FRAME_POINTER_REGNUM
2998 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2999 || (rs2
3000 && (REGNO (rs2) >= 32
3001 && REGNO (rs2) != FRAME_POINTER_REGNUM
3002 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3003 return 0;
3004 }
3005 return 1;
3006 }
3007
3008 /* Construct the SYMBOL_REF for the tls_get_offset function. */
3009
3010 static GTY(()) rtx sparc_tls_symbol;
3011
3012 static rtx
3013 sparc_tls_get_addr (void)
3014 {
3015 if (!sparc_tls_symbol)
3016 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3017
3018 return sparc_tls_symbol;
3019 }
3020
3021 static rtx
3022 sparc_tls_got (void)
3023 {
3024 rtx temp;
3025 if (flag_pic)
3026 {
3027 current_function_uses_pic_offset_table = 1;
3028 return pic_offset_table_rtx;
3029 }
3030
3031 if (!global_offset_table)
3032 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3033 temp = gen_reg_rtx (Pmode);
3034 emit_move_insn (temp, global_offset_table);
3035 return temp;
3036 }
3037
3038 /* Return 1 if *X is a thread-local symbol. */
3039
3040 static int
3041 sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3042 {
3043 return SPARC_SYMBOL_REF_TLS_P (*x);
3044 }
3045
3046 /* Return 1 if X contains a thread-local symbol. */
3047
3048 bool
3049 sparc_tls_referenced_p (rtx x)
3050 {
3051 if (!TARGET_HAVE_TLS)
3052 return false;
3053
3054 return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3055 }
3056
3057 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3058 this (thread-local) address. */
3059
3060 rtx
3061 legitimize_tls_address (rtx addr)
3062 {
3063 rtx temp1, temp2, temp3, ret, o0, got, insn;
3064
3065 gcc_assert (! no_new_pseudos);
3066
3067 if (GET_CODE (addr) == SYMBOL_REF)
3068 switch (SYMBOL_REF_TLS_MODEL (addr))
3069 {
3070 case TLS_MODEL_GLOBAL_DYNAMIC:
3071 start_sequence ();
3072 temp1 = gen_reg_rtx (SImode);
3073 temp2 = gen_reg_rtx (SImode);
3074 ret = gen_reg_rtx (Pmode);
3075 o0 = gen_rtx_REG (Pmode, 8);
3076 got = sparc_tls_got ();
3077 emit_insn (gen_tgd_hi22 (temp1, addr));
3078 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3079 if (TARGET_ARCH32)
3080 {
3081 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3082 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3083 addr, const1_rtx));
3084 }
3085 else
3086 {
3087 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3088 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3089 addr, const1_rtx));
3090 }
3091 CALL_INSN_FUNCTION_USAGE (insn)
3092 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3093 CALL_INSN_FUNCTION_USAGE (insn));
3094 insn = get_insns ();
3095 end_sequence ();
3096 emit_libcall_block (insn, ret, o0, addr);
3097 break;
3098
3099 case TLS_MODEL_LOCAL_DYNAMIC:
3100 start_sequence ();
3101 temp1 = gen_reg_rtx (SImode);
3102 temp2 = gen_reg_rtx (SImode);
3103 temp3 = gen_reg_rtx (Pmode);
3104 ret = gen_reg_rtx (Pmode);
3105 o0 = gen_rtx_REG (Pmode, 8);
3106 got = sparc_tls_got ();
3107 emit_insn (gen_tldm_hi22 (temp1));
3108 emit_insn (gen_tldm_lo10 (temp2, temp1));
3109 if (TARGET_ARCH32)
3110 {
3111 emit_insn (gen_tldm_add32 (o0, got, temp2));
3112 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3113 const1_rtx));
3114 }
3115 else
3116 {
3117 emit_insn (gen_tldm_add64 (o0, got, temp2));
3118 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3119 const1_rtx));
3120 }
3121 CALL_INSN_FUNCTION_USAGE (insn)
3122 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3123 CALL_INSN_FUNCTION_USAGE (insn));
3124 insn = get_insns ();
3125 end_sequence ();
3126 emit_libcall_block (insn, temp3, o0,
3127 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3128 UNSPEC_TLSLD_BASE));
3129 temp1 = gen_reg_rtx (SImode);
3130 temp2 = gen_reg_rtx (SImode);
3131 emit_insn (gen_tldo_hix22 (temp1, addr));
3132 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3133 if (TARGET_ARCH32)
3134 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3135 else
3136 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3137 break;
3138
3139 case TLS_MODEL_INITIAL_EXEC:
3140 temp1 = gen_reg_rtx (SImode);
3141 temp2 = gen_reg_rtx (SImode);
3142 temp3 = gen_reg_rtx (Pmode);
3143 got = sparc_tls_got ();
3144 emit_insn (gen_tie_hi22 (temp1, addr));
3145 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3146 if (TARGET_ARCH32)
3147 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3148 else
3149 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3150 if (TARGET_SUN_TLS)
3151 {
3152 ret = gen_reg_rtx (Pmode);
3153 if (TARGET_ARCH32)
3154 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3155 temp3, addr));
3156 else
3157 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3158 temp3, addr));
3159 }
3160 else
3161 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3162 break;
3163
3164 case TLS_MODEL_LOCAL_EXEC:
3165 temp1 = gen_reg_rtx (Pmode);
3166 temp2 = gen_reg_rtx (Pmode);
3167 if (TARGET_ARCH32)
3168 {
3169 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3170 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3171 }
3172 else
3173 {
3174 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3175 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3176 }
3177 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3178 break;
3179
3180 default:
3181 gcc_unreachable ();
3182 }
3183
3184 else
3185 gcc_unreachable (); /* for now ... */
3186
3187 return ret;
3188 }
3189
3190
3191 /* Legitimize PIC addresses. If the address is already position-independent,
3192 we return ORIG. Newly generated position-independent addresses go into a
3193 reg. This is REG if nonzero, otherwise we allocate register(s) as
3194 necessary. */
3195
3196 rtx
3197 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3198 rtx reg)
3199 {
3200 if (GET_CODE (orig) == SYMBOL_REF)
3201 {
3202 rtx pic_ref, address;
3203 rtx insn;
3204
3205 if (reg == 0)
3206 {
3207 gcc_assert (! reload_in_progress && ! reload_completed);
3208 reg = gen_reg_rtx (Pmode);
3209 }
3210
3211 if (flag_pic == 2)
3212 {
3213 /* If not during reload, allocate another temp reg here for loading
3214 in the address, so that these instructions can be optimized
3215 properly. */
3216 rtx temp_reg = ((reload_in_progress || reload_completed)
3217 ? reg : gen_reg_rtx (Pmode));
3218
3219 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3220 won't get confused into thinking that these two instructions
3221 are loading in the true address of the symbol. If in the
3222 future a PIC rtx exists, that should be used instead. */
3223 if (TARGET_ARCH64)
3224 {
3225 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3226 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3227 }
3228 else
3229 {
3230 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3231 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3232 }
3233 address = temp_reg;
3234 }
3235 else
3236 address = orig;
3237
3238 pic_ref = gen_const_mem (Pmode,
3239 gen_rtx_PLUS (Pmode,
3240 pic_offset_table_rtx, address));
3241 current_function_uses_pic_offset_table = 1;
3242 insn = emit_move_insn (reg, pic_ref);
3243 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3244 by loop. */
3245 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3246 REG_NOTES (insn));
3247 return reg;
3248 }
3249 else if (GET_CODE (orig) == CONST)
3250 {
3251 rtx base, offset;
3252
3253 if (GET_CODE (XEXP (orig, 0)) == PLUS
3254 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3255 return orig;
3256
3257 if (reg == 0)
3258 {
3259 gcc_assert (! reload_in_progress && ! reload_completed);
3260 reg = gen_reg_rtx (Pmode);
3261 }
3262
3263 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3264 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3265 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3266 base == reg ? 0 : reg);
3267
3268 if (GET_CODE (offset) == CONST_INT)
3269 {
3270 if (SMALL_INT (offset))
3271 return plus_constant (base, INTVAL (offset));
3272 else if (! reload_in_progress && ! reload_completed)
3273 offset = force_reg (Pmode, offset);
3274 else
3275 /* If we reach here, then something is seriously wrong. */
3276 gcc_unreachable ();
3277 }
3278 return gen_rtx_PLUS (Pmode, base, offset);
3279 }
3280 else if (GET_CODE (orig) == LABEL_REF)
3281 /* ??? Why do we do this? */
3282 /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3283 the register is live instead, in case it is eliminated. */
3284 current_function_uses_pic_offset_table = 1;
3285
3286 return orig;
3287 }
3288
3289 /* Try machine-dependent ways of modifying an illegitimate address X
3290 to be legitimate. If we find one, return the new, valid address.
3291
3292 OLDX is the address as it was before break_out_memory_refs was called.
3293 In some cases it is useful to look at this to decide what needs to be done.
3294
3295 MODE is the mode of the operand pointed to by X. */
3296
3297 rtx
3298 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3299 {
3300 rtx orig_x = x;
3301
3302 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3303 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3304 force_operand (XEXP (x, 0), NULL_RTX));
3305 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3306 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3307 force_operand (XEXP (x, 1), NULL_RTX));
3308 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3309 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3310 XEXP (x, 1));
3311 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3312 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3313 force_operand (XEXP (x, 1), NULL_RTX));
3314
3315 if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3316 return x;
3317
3318 if (SPARC_SYMBOL_REF_TLS_P (x))
3319 x = legitimize_tls_address (x);
3320 else if (flag_pic)
3321 x = legitimize_pic_address (x, mode, 0);
3322 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3323 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3324 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3325 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3326 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3327 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3328 else if (GET_CODE (x) == SYMBOL_REF
3329 || GET_CODE (x) == CONST
3330 || GET_CODE (x) == LABEL_REF)
3331 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3332 return x;
3333 }
3334
3335 /* Emit the special PIC helper function. */
3336
3337 static void
3338 emit_pic_helper (void)
3339 {
3340 const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3341 int align;
3342
3343 switch_to_section (text_section);
3344
3345 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3346 if (align > 0)
3347 ASM_OUTPUT_ALIGN (asm_out_file, align);
3348 ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3349 if (flag_delayed_branch)
3350 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3351 pic_name, pic_name);
3352 else
3353 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3354 pic_name, pic_name);
3355
3356 pic_helper_emitted_p = true;
3357 }
3358
3359 /* Emit code to load the PIC register. */
3360
3361 static void
3362 load_pic_register (bool delay_pic_helper)
3363 {
3364 int orig_flag_pic = flag_pic;
3365
3366 /* If we haven't initialized the special PIC symbols, do so now. */
3367 if (!pic_helper_symbol_name[0])
3368 {
3369 ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3370 pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3371 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3372 }
3373
3374 /* If we haven't emitted the special PIC helper function, do so now unless
3375 we are requested to delay it. */
3376 if (!delay_pic_helper && !pic_helper_emitted_p)
3377 emit_pic_helper ();
3378
3379 flag_pic = 0;
3380 if (TARGET_ARCH64)
3381 emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3382 pic_helper_symbol));
3383 else
3384 emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3385 pic_helper_symbol));
3386 flag_pic = orig_flag_pic;
3387
3388 /* Need to emit this whether or not we obey regdecls,
3389 since setjmp/longjmp can cause life info to screw up.
3390 ??? In the case where we don't obey regdecls, this is not sufficient
3391 since we may not fall out the bottom. */
3392 emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3393 }
3394 \f
3395 /* Return 1 if RTX is a MEM which is known to be aligned to at
3396 least a DESIRED byte boundary. */
3397
3398 int
3399 mem_min_alignment (rtx mem, int desired)
3400 {
3401 rtx addr, base, offset;
3402
3403 /* If it's not a MEM we can't accept it. */
3404 if (GET_CODE (mem) != MEM)
3405 return 0;
3406
3407 /* Obviously... */
3408 if (!TARGET_UNALIGNED_DOUBLES
3409 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3410 return 1;
3411
3412 /* ??? The rest of the function predates MEM_ALIGN so
3413 there is probably a bit of redundancy. */
3414 addr = XEXP (mem, 0);
3415 base = offset = NULL_RTX;
3416 if (GET_CODE (addr) == PLUS)
3417 {
3418 if (GET_CODE (XEXP (addr, 0)) == REG)
3419 {
3420 base = XEXP (addr, 0);
3421
3422 /* What we are saying here is that if the base
3423 REG is aligned properly, the compiler will make
3424 sure any REG based index upon it will be so
3425 as well. */
3426 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3427 offset = XEXP (addr, 1);
3428 else
3429 offset = const0_rtx;
3430 }
3431 }
3432 else if (GET_CODE (addr) == REG)
3433 {
3434 base = addr;
3435 offset = const0_rtx;
3436 }
3437
3438 if (base != NULL_RTX)
3439 {
3440 int regno = REGNO (base);
3441
3442 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3443 {
3444 /* Check if the compiler has recorded some information
3445 about the alignment of the base REG. If reload has
3446 completed, we already matched with proper alignments.
3447 If not running global_alloc, reload might give us
3448 unaligned pointer to local stack though. */
3449 if (((cfun != 0
3450 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3451 || (optimize && reload_completed))
3452 && (INTVAL (offset) & (desired - 1)) == 0)
3453 return 1;
3454 }
3455 else
3456 {
3457 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3458 return 1;
3459 }
3460 }
3461 else if (! TARGET_UNALIGNED_DOUBLES
3462 || CONSTANT_P (addr)
3463 || GET_CODE (addr) == LO_SUM)
3464 {
3465 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3466 is true, in which case we can only assume that an access is aligned if
3467 it is to a constant address, or the address involves a LO_SUM. */
3468 return 1;
3469 }
3470
3471 /* An obviously unaligned address. */
3472 return 0;
3473 }
3474
3475 \f
3476 /* Vectors to keep interesting information about registers where it can easily
3477 be got. We used to use the actual mode value as the bit number, but there
3478 are more than 32 modes now. Instead we use two tables: one indexed by
3479 hard register number, and one indexed by mode. */
3480
3481 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3482 they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
3483 mapped into one sparc_mode_class mode. */
3484
3485 enum sparc_mode_class {
3486 S_MODE, D_MODE, T_MODE, O_MODE,
3487 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3488 CC_MODE, CCFP_MODE
3489 };
3490
3491 /* Modes for single-word and smaller quantities. */
3492 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3493
3494 /* Modes for double-word and smaller quantities. */
3495 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3496
3497 /* Modes for quad-word and smaller quantities. */
3498 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3499
3500 /* Modes for 8-word and smaller quantities. */
3501 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3502
3503 /* Modes for single-float quantities. We must allow any single word or
3504 smaller quantity. This is because the fix/float conversion instructions
3505 take integer inputs/outputs from the float registers. */
3506 #define SF_MODES (S_MODES)
3507
3508 /* Modes for double-float and smaller quantities. */
3509 #define DF_MODES (S_MODES | D_MODES)
3510
3511 /* Modes for double-float only quantities. */
3512 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3513
3514 /* Modes for quad-float only quantities. */
3515 #define TF_ONLY_MODES (1 << (int) TF_MODE)
3516
3517 /* Modes for quad-float and smaller quantities. */
3518 #define TF_MODES (DF_MODES | TF_ONLY_MODES)
3519
3520 /* Modes for quad-float and double-float quantities. */
3521 #define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3522
3523 /* Modes for quad-float pair only quantities. */
3524 #define OF_ONLY_MODES (1 << (int) OF_MODE)
3525
3526 /* Modes for quad-float pairs and smaller quantities. */
3527 #define OF_MODES (TF_MODES | OF_ONLY_MODES)
3528
3529 #define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3530
3531 /* Modes for condition codes. */
3532 #define CC_MODES (1 << (int) CC_MODE)
3533 #define CCFP_MODES (1 << (int) CCFP_MODE)
3534
3535 /* Value is 1 if register/mode pair is acceptable on sparc.
3536 The funny mixture of D and T modes is because integer operations
3537 do not specially operate on tetra quantities, so non-quad-aligned
3538 registers can hold quadword quantities (except %o4 and %i4 because
3539 they cross fixed registers). */
3540
3541 /* This points to either the 32 bit or the 64 bit version. */
3542 const int *hard_regno_mode_classes;
3543
3544 static const int hard_32bit_mode_classes[] = {
3545 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3546 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3547 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3548 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3549
3550 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3551 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3552 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3553 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3554
3555 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3556 and none can hold SFmode/SImode values. */
3557 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3558 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3559 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3560 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3561
3562 /* %fcc[0123] */
3563 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3564
3565 /* %icc */
3566 CC_MODES
3567 };
3568
3569 static const int hard_64bit_mode_classes[] = {
3570 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3571 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3572 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3573 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3574
3575 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3576 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3577 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3578 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3579
3580 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3581 and none can hold SFmode/SImode values. */
3582 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3583 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3584 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3585 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3586
3587 /* %fcc[0123] */
3588 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3589
3590 /* %icc */
3591 CC_MODES
3592 };
3593
3594 int sparc_mode_class [NUM_MACHINE_MODES];
3595
3596 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3597
3598 static void
3599 sparc_init_modes (void)
3600 {
3601 int i;
3602
3603 for (i = 0; i < NUM_MACHINE_MODES; i++)
3604 {
3605 switch (GET_MODE_CLASS (i))
3606 {
3607 case MODE_INT:
3608 case MODE_PARTIAL_INT:
3609 case MODE_COMPLEX_INT:
3610 if (GET_MODE_SIZE (i) <= 4)
3611 sparc_mode_class[i] = 1 << (int) S_MODE;
3612 else if (GET_MODE_SIZE (i) == 8)
3613 sparc_mode_class[i] = 1 << (int) D_MODE;
3614 else if (GET_MODE_SIZE (i) == 16)
3615 sparc_mode_class[i] = 1 << (int) T_MODE;
3616 else if (GET_MODE_SIZE (i) == 32)
3617 sparc_mode_class[i] = 1 << (int) O_MODE;
3618 else
3619 sparc_mode_class[i] = 0;
3620 break;
3621 case MODE_VECTOR_INT:
3622 if (GET_MODE_SIZE (i) <= 4)
3623 sparc_mode_class[i] = 1 << (int)SF_MODE;
3624 else if (GET_MODE_SIZE (i) == 8)
3625 sparc_mode_class[i] = 1 << (int)DF_MODE;
3626 break;
3627 case MODE_FLOAT:
3628 case MODE_COMPLEX_FLOAT:
3629 if (GET_MODE_SIZE (i) <= 4)
3630 sparc_mode_class[i] = 1 << (int) SF_MODE;
3631 else if (GET_MODE_SIZE (i) == 8)
3632 sparc_mode_class[i] = 1 << (int) DF_MODE;
3633 else if (GET_MODE_SIZE (i) == 16)
3634 sparc_mode_class[i] = 1 << (int) TF_MODE;
3635 else if (GET_MODE_SIZE (i) == 32)
3636 sparc_mode_class[i] = 1 << (int) OF_MODE;
3637 else
3638 sparc_mode_class[i] = 0;
3639 break;
3640 case MODE_CC:
3641 if (i == (int) CCFPmode || i == (int) CCFPEmode)
3642 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3643 else
3644 sparc_mode_class[i] = 1 << (int) CC_MODE;
3645 break;
3646 default:
3647 sparc_mode_class[i] = 0;
3648 break;
3649 }
3650 }
3651
3652 if (TARGET_ARCH64)
3653 hard_regno_mode_classes = hard_64bit_mode_classes;
3654 else
3655 hard_regno_mode_classes = hard_32bit_mode_classes;
3656
3657 /* Initialize the array used by REGNO_REG_CLASS. */
3658 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3659 {
3660 if (i < 16 && TARGET_V8PLUS)
3661 sparc_regno_reg_class[i] = I64_REGS;
3662 else if (i < 32 || i == FRAME_POINTER_REGNUM)
3663 sparc_regno_reg_class[i] = GENERAL_REGS;
3664 else if (i < 64)
3665 sparc_regno_reg_class[i] = FP_REGS;
3666 else if (i < 96)
3667 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3668 else if (i < 100)
3669 sparc_regno_reg_class[i] = FPCC_REGS;
3670 else
3671 sparc_regno_reg_class[i] = NO_REGS;
3672 }
3673 }
3674 \f
3675 /* Compute the frame size required by the function. This function is called
3676 during the reload pass and also by sparc_expand_prologue. */
3677
3678 HOST_WIDE_INT
3679 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3680 {
3681 int outgoing_args_size = (current_function_outgoing_args_size
3682 + REG_PARM_STACK_SPACE (current_function_decl));
3683 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
3684 int i;
3685
3686 if (TARGET_ARCH64)
3687 {
3688 for (i = 0; i < 8; i++)
3689 if (regs_ever_live[i] && ! call_used_regs[i])
3690 n_regs += 2;
3691 }
3692 else
3693 {
3694 for (i = 0; i < 8; i += 2)
3695 if ((regs_ever_live[i] && ! call_used_regs[i])
3696 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3697 n_regs += 2;
3698 }
3699
3700 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3701 if ((regs_ever_live[i] && ! call_used_regs[i])
3702 || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3703 n_regs += 2;
3704
3705 /* Set up values for use in prologue and epilogue. */
3706 num_gfregs = n_regs;
3707
3708 if (leaf_function_p
3709 && n_regs == 0
3710 && size == 0
3711 && current_function_outgoing_args_size == 0)
3712 actual_fsize = apparent_fsize = 0;
3713 else
3714 {
3715 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
3716 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3717 apparent_fsize += n_regs * 4;
3718 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3719 }
3720
3721 /* Make sure nothing can clobber our register windows.
3722 If a SAVE must be done, or there is a stack-local variable,
3723 the register window area must be allocated. */
3724 if (! leaf_function_p || size > 0)
3725 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3726
3727 return SPARC_STACK_ALIGN (actual_fsize);
3728 }
3729
3730 /* Output any necessary .register pseudo-ops. */
3731
3732 void
3733 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3734 {
3735 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
3736 int i;
3737
3738 if (TARGET_ARCH32)
3739 return;
3740
3741 /* Check if %g[2367] were used without
3742 .register being printed for them already. */
3743 for (i = 2; i < 8; i++)
3744 {
3745 if (regs_ever_live [i]
3746 && ! sparc_hard_reg_printed [i])
3747 {
3748 sparc_hard_reg_printed [i] = 1;
3749 /* %g7 is used as TLS base register, use #ignore
3750 for it instead of #scratch. */
3751 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3752 i == 7 ? "ignore" : "scratch");
3753 }
3754 if (i == 3) i = 5;
3755 }
3756 #endif
3757 }
3758
3759 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3760 as needed. LOW should be double-word aligned for 32-bit registers.
3761 Return the new OFFSET. */
3762
3763 #define SORR_SAVE 0
3764 #define SORR_RESTORE 1
3765
3766 static int
3767 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3768 {
3769 rtx mem, insn;
3770 int i;
3771
3772 if (TARGET_ARCH64 && high <= 32)
3773 {
3774 for (i = low; i < high; i++)
3775 {
3776 if (regs_ever_live[i] && ! call_used_regs[i])
3777 {
3778 mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3779 set_mem_alias_set (mem, sparc_sr_alias_set);
3780 if (action == SORR_SAVE)
3781 {
3782 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3783 RTX_FRAME_RELATED_P (insn) = 1;
3784 }
3785 else /* action == SORR_RESTORE */
3786 emit_move_insn (gen_rtx_REG (DImode, i), mem);
3787 offset += 8;
3788 }
3789 }
3790 }
3791 else
3792 {
3793 for (i = low; i < high; i += 2)
3794 {
3795 bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3796 bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3797 enum machine_mode mode;
3798 int regno;
3799
3800 if (reg0 && reg1)
3801 {
3802 mode = i < 32 ? DImode : DFmode;
3803 regno = i;
3804 }
3805 else if (reg0)
3806 {
3807 mode = i < 32 ? SImode : SFmode;
3808 regno = i;
3809 }
3810 else if (reg1)
3811 {
3812 mode = i < 32 ? SImode : SFmode;
3813 regno = i + 1;
3814 offset += 4;
3815 }
3816 else
3817 continue;
3818
3819 mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3820 set_mem_alias_set (mem, sparc_sr_alias_set);
3821 if (action == SORR_SAVE)
3822 {
3823 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3824 RTX_FRAME_RELATED_P (insn) = 1;
3825 }
3826 else /* action == SORR_RESTORE */
3827 emit_move_insn (gen_rtx_REG (mode, regno), mem);
3828
3829 /* Always preserve double-word alignment. */
3830 offset = (offset + 7) & -8;
3831 }
3832 }
3833
3834 return offset;
3835 }
3836
3837 /* Emit code to save call-saved registers. */
3838
3839 static void
3840 emit_save_or_restore_regs (int action)
3841 {
3842 HOST_WIDE_INT offset;
3843 rtx base;
3844
3845 offset = frame_base_offset - apparent_fsize;
3846
3847 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3848 {
3849 /* ??? This might be optimized a little as %g1 might already have a
3850 value close enough that a single add insn will do. */
3851 /* ??? Although, all of this is probably only a temporary fix
3852 because if %g1 can hold a function result, then
3853 sparc_expand_epilogue will lose (the result will be
3854 clobbered). */
3855 base = gen_rtx_REG (Pmode, 1);
3856 emit_move_insn (base, GEN_INT (offset));
3857 emit_insn (gen_rtx_SET (VOIDmode,
3858 base,
3859 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3860 offset = 0;
3861 }
3862 else
3863 base = frame_base_reg;
3864
3865 offset = save_or_restore_regs (0, 8, base, offset, action);
3866 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3867 }
3868
3869 /* Generate a save_register_window insn. */
3870
3871 static rtx
3872 gen_save_register_window (rtx increment)
3873 {
3874 if (TARGET_ARCH64)
3875 return gen_save_register_windowdi (increment);
3876 else
3877 return gen_save_register_windowsi (increment);
3878 }
3879
3880 /* Generate an increment for the stack pointer. */
3881
3882 static rtx
3883 gen_stack_pointer_inc (rtx increment)
3884 {
3885 return gen_rtx_SET (VOIDmode,
3886 stack_pointer_rtx,
3887 gen_rtx_PLUS (Pmode,
3888 stack_pointer_rtx,
3889 increment));
3890 }
3891
3892 /* Generate a decrement for the stack pointer. */
3893
3894 static rtx
3895 gen_stack_pointer_dec (rtx decrement)
3896 {
3897 return gen_rtx_SET (VOIDmode,
3898 stack_pointer_rtx,
3899 gen_rtx_MINUS (Pmode,
3900 stack_pointer_rtx,
3901 decrement));
3902 }
3903
3904 /* Expand the function prologue. The prologue is responsible for reserving
3905 storage for the frame, saving the call-saved registers and loading the
3906 PIC register if needed. */
3907
3908 void
3909 sparc_expand_prologue (void)
3910 {
3911 rtx insn;
3912 int i;
3913
3914 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
3915 on the final value of the flag means deferring the prologue/epilogue
3916 expansion until just before the second scheduling pass, which is too
3917 late to emit multiple epilogues or return insns.
3918
3919 Of course we are making the assumption that the value of the flag
3920 will not change between now and its final value. Of the three parts
3921 of the formula, only the last one can reasonably vary. Let's take a
3922 closer look, after assuming that the first two ones are set to true
3923 (otherwise the last value is effectively silenced).
3924
3925 If only_leaf_regs_used returns false, the global predicate will also
3926 be false so the actual frame size calculated below will be positive.
3927 As a consequence, the save_register_window insn will be emitted in
3928 the instruction stream; now this insn explicitly references %fp
3929 which is not a leaf register so only_leaf_regs_used will always
3930 return false subsequently.
3931
3932 If only_leaf_regs_used returns true, we hope that the subsequent
3933 optimization passes won't cause non-leaf registers to pop up. For
3934 example, the regrename pass has special provisions to not rename to
3935 non-leaf registers in a leaf function. */
3936 sparc_leaf_function_p
3937 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3938
3939 /* Need to use actual_fsize, since we are also allocating
3940 space for our callee (and our own register save area). */
3941 actual_fsize
3942 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3943
3944 /* Advertise that the data calculated just above are now valid. */
3945 sparc_prologue_data_valid_p = true;
3946
3947 if (sparc_leaf_function_p)
3948 {
3949 frame_base_reg = stack_pointer_rtx;
3950 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3951 }
3952 else
3953 {
3954 frame_base_reg = hard_frame_pointer_rtx;
3955 frame_base_offset = SPARC_STACK_BIAS;
3956 }
3957
3958 if (actual_fsize == 0)
3959 /* do nothing. */ ;
3960 else if (sparc_leaf_function_p)
3961 {
3962 if (actual_fsize <= 4096)
3963 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3964 else if (actual_fsize <= 8192)
3965 {
3966 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3967 /* %sp is still the CFA register. */
3968 RTX_FRAME_RELATED_P (insn) = 1;
3969 insn
3970 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3971 }
3972 else
3973 {
3974 rtx reg = gen_rtx_REG (Pmode, 1);
3975 emit_move_insn (reg, GEN_INT (-actual_fsize));
3976 insn = emit_insn (gen_stack_pointer_inc (reg));
3977 REG_NOTES (insn) =
3978 gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3979 gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
3980 REG_NOTES (insn));
3981 }
3982
3983 RTX_FRAME_RELATED_P (insn) = 1;
3984 }
3985 else
3986 {
3987 if (actual_fsize <= 4096)
3988 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3989 else if (actual_fsize <= 8192)
3990 {
3991 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3992 /* %sp is not the CFA register anymore. */
3993 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3994 }
3995 else
3996 {
3997 rtx reg = gen_rtx_REG (Pmode, 1);
3998 emit_move_insn (reg, GEN_INT (-actual_fsize));
3999 insn = emit_insn (gen_save_register_window (reg));
4000 }
4001
4002 RTX_FRAME_RELATED_P (insn) = 1;
4003 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4004 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4005 }
4006
4007 if (num_gfregs)
4008 emit_save_or_restore_regs (SORR_SAVE);
4009
4010 /* Load the PIC register if needed. */
4011 if (flag_pic && current_function_uses_pic_offset_table)
4012 load_pic_register (false);
4013 }
4014
4015 /* This function generates the assembly code for function entry, which boils
4016 down to emitting the necessary .register directives. */
4017
4018 static void
4019 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4020 {
4021 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4022 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4023
4024 sparc_output_scratch_registers (file);
4025 }
4026
4027 /* Expand the function epilogue, either normal or part of a sibcall.
4028 We emit all the instructions except the return or the call. */
4029
4030 void
4031 sparc_expand_epilogue (void)
4032 {
4033 if (num_gfregs)
4034 emit_save_or_restore_regs (SORR_RESTORE);
4035
4036 if (actual_fsize == 0)
4037 /* do nothing. */ ;
4038 else if (sparc_leaf_function_p)
4039 {
4040 if (actual_fsize <= 4096)
4041 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4042 else if (actual_fsize <= 8192)
4043 {
4044 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4045 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4046 }
4047 else
4048 {
4049 rtx reg = gen_rtx_REG (Pmode, 1);
4050 emit_move_insn (reg, GEN_INT (-actual_fsize));
4051 emit_insn (gen_stack_pointer_dec (reg));
4052 }
4053 }
4054 }
4055
4056 /* Return true if it is appropriate to emit `return' instructions in the
4057 body of a function. */
4058
4059 bool
4060 sparc_can_use_return_insn_p (void)
4061 {
4062 return sparc_prologue_data_valid_p
4063 && (actual_fsize == 0 || !sparc_leaf_function_p);
4064 }
4065
4066 /* This function generates the assembly code for function exit. */
4067
4068 static void
4069 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4070 {
4071 /* If code does not drop into the epilogue, we have to still output
4072 a dummy nop for the sake of sane backtraces. Otherwise, if the
4073 last two instructions of a function were "call foo; dslot;" this
4074 can make the return PC of foo (i.e. address of call instruction
4075 plus 8) point to the first instruction in the next function. */
4076
4077 rtx insn, last_real_insn;
4078
4079 insn = get_last_insn ();
4080
4081 last_real_insn = prev_real_insn (insn);
4082 if (last_real_insn
4083 && GET_CODE (last_real_insn) == INSN
4084 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4085 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4086
4087 if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4088 fputs("\tnop\n", file);
4089
4090 sparc_output_deferred_case_vectors ();
4091 }
4092
4093 /* Output a 'restore' instruction. */
4094
4095 static void
4096 output_restore (rtx pat)
4097 {
4098 rtx operands[3];
4099
4100 if (! pat)
4101 {
4102 fputs ("\t restore\n", asm_out_file);
4103 return;
4104 }
4105
4106 gcc_assert (GET_CODE (pat) == SET);
4107
4108 operands[0] = SET_DEST (pat);
4109 pat = SET_SRC (pat);
4110
4111 switch (GET_CODE (pat))
4112 {
4113 case PLUS:
4114 operands[1] = XEXP (pat, 0);
4115 operands[2] = XEXP (pat, 1);
4116 output_asm_insn (" restore %r1, %2, %Y0", operands);
4117 break;
4118 case LO_SUM:
4119 operands[1] = XEXP (pat, 0);
4120 operands[2] = XEXP (pat, 1);
4121 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4122 break;
4123 case ASHIFT:
4124 operands[1] = XEXP (pat, 0);
4125 gcc_assert (XEXP (pat, 1) == const1_rtx);
4126 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4127 break;
4128 default:
4129 operands[1] = pat;
4130 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4131 break;
4132 }
4133 }
4134
4135 /* Output a return. */
4136
4137 const char *
4138 output_return (rtx insn)
4139 {
4140 if (sparc_leaf_function_p)
4141 {
4142 /* This is a leaf function so we don't have to bother restoring the
4143 register window, which frees us from dealing with the convoluted
4144 semantics of restore/return. We simply output the jump to the
4145 return address and the insn in the delay slot (if any). */
4146
4147 gcc_assert (! current_function_calls_eh_return);
4148
4149 return "jmp\t%%o7+%)%#";
4150 }
4151 else
4152 {
4153 /* This is a regular function so we have to restore the register window.
4154 We may have a pending insn for the delay slot, which will be either
4155 combined with the 'restore' instruction or put in the delay slot of
4156 the 'return' instruction. */
4157
4158 if (current_function_calls_eh_return)
4159 {
4160 /* If the function uses __builtin_eh_return, the eh_return
4161 machinery occupies the delay slot. */
4162 gcc_assert (! final_sequence);
4163
4164 if (! flag_delayed_branch)
4165 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4166
4167 if (TARGET_V9)
4168 fputs ("\treturn\t%i7+8\n", asm_out_file);
4169 else
4170 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4171
4172 if (flag_delayed_branch)
4173 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4174 else
4175 fputs ("\t nop\n", asm_out_file);
4176 }
4177 else if (final_sequence)
4178 {
4179 rtx delay, pat;
4180
4181 delay = NEXT_INSN (insn);
4182 gcc_assert (delay);
4183
4184 pat = PATTERN (delay);
4185
4186 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4187 {
4188 epilogue_renumber (&pat, 0);
4189 return "return\t%%i7+%)%#";
4190 }
4191 else
4192 {
4193 output_asm_insn ("jmp\t%%i7+%)", NULL);
4194 output_restore (pat);
4195 PATTERN (delay) = gen_blockage ();
4196 INSN_CODE (delay) = -1;
4197 }
4198 }
4199 else
4200 {
4201 /* The delay slot is empty. */
4202 if (TARGET_V9)
4203 return "return\t%%i7+%)\n\t nop";
4204 else if (flag_delayed_branch)
4205 return "jmp\t%%i7+%)\n\t restore";
4206 else
4207 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4208 }
4209 }
4210
4211 return "";
4212 }
4213
4214 /* Output a sibling call. */
4215
4216 const char *
4217 output_sibcall (rtx insn, rtx call_operand)
4218 {
4219 rtx operands[1];
4220
4221 gcc_assert (flag_delayed_branch);
4222
4223 operands[0] = call_operand;
4224
4225 if (sparc_leaf_function_p)
4226 {
4227 /* This is a leaf function so we don't have to bother restoring the
4228 register window. We simply output the jump to the function and
4229 the insn in the delay slot (if any). */
4230
4231 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4232
4233 if (final_sequence)
4234 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4235 operands);
4236 else
4237 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4238 it into branch if possible. */
4239 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4240 operands);
4241 }
4242 else
4243 {
4244 /* This is a regular function so we have to restore the register window.
4245 We may have a pending insn for the delay slot, which will be combined
4246 with the 'restore' instruction. */
4247
4248 output_asm_insn ("call\t%a0, 0", operands);
4249
4250 if (final_sequence)
4251 {
4252 rtx delay = NEXT_INSN (insn);
4253 gcc_assert (delay);
4254
4255 output_restore (PATTERN (delay));
4256
4257 PATTERN (delay) = gen_blockage ();
4258 INSN_CODE (delay) = -1;
4259 }
4260 else
4261 output_restore (NULL_RTX);
4262 }
4263
4264 return "";
4265 }
4266 \f
4267 /* Functions for handling argument passing.
4268
4269 For 32-bit, the first 6 args are normally in registers and the rest are
4270 pushed. Any arg that starts within the first 6 words is at least
4271 partially passed in a register unless its data type forbids.
4272
4273 For 64-bit, the argument registers are laid out as an array of 16 elements
4274 and arguments are added sequentially. The first 6 int args and up to the
4275 first 16 fp args (depending on size) are passed in regs.
4276
4277 Slot Stack Integral Float Float in structure Double Long Double
4278 ---- ----- -------- ----- ------------------ ------ -----------
4279 15 [SP+248] %f31 %f30,%f31 %d30
4280 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4281 13 [SP+232] %f27 %f26,%f27 %d26
4282 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4283 11 [SP+216] %f23 %f22,%f23 %d22
4284 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4285 9 [SP+200] %f19 %f18,%f19 %d18
4286 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4287 7 [SP+184] %f15 %f14,%f15 %d14
4288 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4289 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4290 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4291 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4292 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4293 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4294 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4295
4296 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4297
4298 Integral arguments are always passed as 64-bit quantities appropriately
4299 extended.
4300
4301 Passing of floating point values is handled as follows.
4302 If a prototype is in scope:
4303 If the value is in a named argument (i.e. not a stdarg function or a
4304 value not part of the `...') then the value is passed in the appropriate
4305 fp reg.
4306 If the value is part of the `...' and is passed in one of the first 6
4307 slots then the value is passed in the appropriate int reg.
4308 If the value is part of the `...' and is not passed in one of the first 6
4309 slots then the value is passed in memory.
4310 If a prototype is not in scope:
4311 If the value is one of the first 6 arguments the value is passed in the
4312 appropriate integer reg and the appropriate fp reg.
4313 If the value is not one of the first 6 arguments the value is passed in
4314 the appropriate fp reg and in memory.
4315
4316
4317 Summary of the calling conventions implemented by GCC on SPARC:
4318
4319 32-bit ABI:
4320 size argument return value
4321
4322 small integer <4 int. reg. int. reg.
4323 word 4 int. reg. int. reg.
4324 double word 8 int. reg. int. reg.
4325
4326 _Complex small integer <8 int. reg. int. reg.
4327 _Complex word 8 int. reg. int. reg.
4328 _Complex double word 16 memory int. reg.
4329
4330 vector integer <=8 int. reg. FP reg.
4331 vector integer >8 memory memory
4332
4333 float 4 int. reg. FP reg.
4334 double 8 int. reg. FP reg.
4335 long double 16 memory memory
4336
4337 _Complex float 8 memory FP reg.
4338 _Complex double 16 memory FP reg.
4339 _Complex long double 32 memory FP reg.
4340
4341 vector float any memory memory
4342
4343 aggregate any memory memory
4344
4345
4346
4347 64-bit ABI:
4348 size argument return value
4349
4350 small integer <8 int. reg. int. reg.
4351 word 8 int. reg. int. reg.
4352 double word 16 int. reg. int. reg.
4353
4354 _Complex small integer <16 int. reg. int. reg.
4355 _Complex word 16 int. reg. int. reg.
4356 _Complex double word 32 memory int. reg.
4357
4358 vector integer <=16 FP reg. FP reg.
4359 vector integer 16<s<=32 memory FP reg.
4360 vector integer >32 memory memory
4361
4362 float 4 FP reg. FP reg.
4363 double 8 FP reg. FP reg.
4364 long double 16 FP reg. FP reg.
4365
4366 _Complex float 8 FP reg. FP reg.
4367 _Complex double 16 FP reg. FP reg.
4368 _Complex long double 32 memory FP reg.
4369
4370 vector float <=16 FP reg. FP reg.
4371 vector float 16<s<=32 memory FP reg.
4372 vector float >32 memory memory
4373
4374 aggregate <=16 reg. reg.
4375 aggregate 16<s<=32 memory reg.
4376 aggregate >32 memory memory
4377
4378
4379
4380 Note #1: complex floating-point types follow the extended SPARC ABIs as
4381 implemented by the Sun compiler.
4382
4383 Note #2: integral vector types follow the scalar floating-point types
4384 conventions to match what is implemented by the Sun VIS SDK.
4385
4386 Note #3: floating-point vector types follow the aggregate types
4387 conventions. */
4388
4389
4390 /* Maximum number of int regs for args. */
4391 #define SPARC_INT_ARG_MAX 6
4392 /* Maximum number of fp regs for args. */
4393 #define SPARC_FP_ARG_MAX 16
4394
4395 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4396
4397 /* Handle the INIT_CUMULATIVE_ARGS macro.
4398 Initialize a variable CUM of type CUMULATIVE_ARGS
4399 for a call to a function whose data type is FNTYPE.
4400 For a library call, FNTYPE is 0. */
4401
4402 void
4403 init_cumulative_args (struct sparc_args *cum, tree fntype,
4404 rtx libname ATTRIBUTE_UNUSED,
4405 tree fndecl ATTRIBUTE_UNUSED)
4406 {
4407 cum->words = 0;
4408 cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4409 cum->libcall_p = fntype == 0;
4410 }
4411
4412 /* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4413 When a prototype says `char' or `short', really pass an `int'. */
4414
4415 static bool
4416 sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4417 {
4418 return TARGET_ARCH32 ? true : false;
4419 }
4420
4421 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4422
4423 static bool
4424 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4425 {
4426 return TARGET_ARCH64 ? true : false;
4427 }
4428
4429 /* Scan the record type TYPE and return the following predicates:
4430 - INTREGS_P: the record contains at least one field or sub-field
4431 that is eligible for promotion in integer registers.
4432 - FP_REGS_P: the record contains at least one field or sub-field
4433 that is eligible for promotion in floating-point registers.
4434 - PACKED_P: the record contains at least one field that is packed.
4435
4436 Sub-fields are not taken into account for the PACKED_P predicate. */
4437
4438 static void
4439 scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4440 {
4441 tree field;
4442
4443 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4444 {
4445 if (TREE_CODE (field) == FIELD_DECL)
4446 {
4447 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4448 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4449 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4450 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4451 && TARGET_FPU)
4452 *fpregs_p = 1;
4453 else
4454 *intregs_p = 1;
4455
4456 if (packed_p && DECL_PACKED (field))
4457 *packed_p = 1;
4458 }
4459 }
4460 }
4461
4462 /* Compute the slot number to pass an argument in.
4463 Return the slot number or -1 if passing on the stack.
4464
4465 CUM is a variable of type CUMULATIVE_ARGS which gives info about
4466 the preceding args and about the function being called.
4467 MODE is the argument's machine mode.
4468 TYPE is the data type of the argument (as a tree).
4469 This is null for libcalls where that information may
4470 not be available.
4471 NAMED is nonzero if this argument is a named parameter
4472 (otherwise it is an extra parameter matching an ellipsis).
4473 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4474 *PREGNO records the register number to use if scalar type.
4475 *PPADDING records the amount of padding needed in words. */
4476
4477 static int
4478 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4479 tree type, int named, int incoming_p,
4480 int *pregno, int *ppadding)
4481 {
4482 int regbase = (incoming_p
4483 ? SPARC_INCOMING_INT_ARG_FIRST
4484 : SPARC_OUTGOING_INT_ARG_FIRST);
4485 int slotno = cum->words;
4486 enum mode_class mclass;
4487 int regno;
4488
4489 *ppadding = 0;
4490
4491 if (type && TREE_ADDRESSABLE (type))
4492 return -1;
4493
4494 if (TARGET_ARCH32
4495 && mode == BLKmode
4496 && type
4497 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4498 return -1;
4499
4500 /* For SPARC64, objects requiring 16-byte alignment get it. */
4501 if (TARGET_ARCH64
4502 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4503 && (slotno & 1) != 0)
4504 slotno++, *ppadding = 1;
4505
4506 mclass = GET_MODE_CLASS (mode);
4507 if (type && TREE_CODE (type) == VECTOR_TYPE)
4508 {
4509 /* Vector types deserve special treatment because they are
4510 polymorphic wrt their mode, depending upon whether VIS
4511 instructions are enabled. */
4512 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4513 {
4514 /* The SPARC port defines no floating-point vector modes. */
4515 gcc_assert (mode == BLKmode);
4516 }
4517 else
4518 {
4519 /* Integral vector types should either have a vector
4520 mode or an integral mode, because we are guaranteed
4521 by pass_by_reference that their size is not greater
4522 than 16 bytes and TImode is 16-byte wide. */
4523 gcc_assert (mode != BLKmode);
4524
4525 /* Vector integers are handled like floats according to
4526 the Sun VIS SDK. */
4527 mclass = MODE_FLOAT;
4528 }
4529 }
4530
4531 switch (mclass)
4532 {
4533 case MODE_FLOAT:
4534 case MODE_COMPLEX_FLOAT:
4535 if (TARGET_ARCH64 && TARGET_FPU && named)
4536 {
4537 if (slotno >= SPARC_FP_ARG_MAX)
4538 return -1;
4539 regno = SPARC_FP_ARG_FIRST + slotno * 2;
4540 /* Arguments filling only one single FP register are
4541 right-justified in the outer double FP register. */
4542 if (GET_MODE_SIZE (mode) <= 4)
4543 regno++;
4544 break;
4545 }
4546 /* fallthrough */
4547
4548 case MODE_INT:
4549 case MODE_COMPLEX_INT:
4550 if (slotno >= SPARC_INT_ARG_MAX)
4551 return -1;
4552 regno = regbase + slotno;
4553 break;
4554
4555 case MODE_RANDOM:
4556 if (mode == VOIDmode)
4557 /* MODE is VOIDmode when generating the actual call. */
4558 return -1;
4559
4560 gcc_assert (mode == BLKmode);
4561
4562 if (TARGET_ARCH32 || !type || (TREE_CODE (type) == UNION_TYPE))
4563 {
4564 if (slotno >= SPARC_INT_ARG_MAX)
4565 return -1;
4566 regno = regbase + slotno;
4567 }
4568 else /* TARGET_ARCH64 && type */
4569 {
4570 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4571
4572 /* First see what kinds of registers we would need. */
4573 if (TREE_CODE (type) == VECTOR_TYPE)
4574 fpregs_p = 1;
4575 else
4576 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4577
4578 /* The ABI obviously doesn't specify how packed structures
4579 are passed. These are defined to be passed in int regs
4580 if possible, otherwise memory. */
4581 if (packed_p || !named)
4582 fpregs_p = 0, intregs_p = 1;
4583
4584 /* If all arg slots are filled, then must pass on stack. */
4585 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4586 return -1;
4587
4588 /* If there are only int args and all int arg slots are filled,
4589 then must pass on stack. */
4590 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4591 return -1;
4592
4593 /* Note that even if all int arg slots are filled, fp members may
4594 still be passed in regs if such regs are available.
4595 *PREGNO isn't set because there may be more than one, it's up
4596 to the caller to compute them. */
4597 return slotno;
4598 }
4599 break;
4600
4601 default :
4602 gcc_unreachable ();
4603 }
4604
4605 *pregno = regno;
4606 return slotno;
4607 }
4608
4609 /* Handle recursive register counting for structure field layout. */
4610
4611 struct function_arg_record_value_parms
4612 {
4613 rtx ret; /* return expression being built. */
4614 int slotno; /* slot number of the argument. */
4615 int named; /* whether the argument is named. */
4616 int regbase; /* regno of the base register. */
4617 int stack; /* 1 if part of the argument is on the stack. */
4618 int intoffset; /* offset of the first pending integer field. */
4619 unsigned int nregs; /* number of words passed in registers. */
4620 };
4621
4622 static void function_arg_record_value_3
4623 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4624 static void function_arg_record_value_2
4625 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4626 static void function_arg_record_value_1
4627 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4628 static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4629 static rtx function_arg_union_value (int, enum machine_mode, int, int);
4630
4631 /* A subroutine of function_arg_record_value. Traverse the structure
4632 recursively and determine how many registers will be required. */
4633
4634 static void
4635 function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4636 struct function_arg_record_value_parms *parms,
4637 bool packed_p)
4638 {
4639 tree field;
4640
4641 /* We need to compute how many registers are needed so we can
4642 allocate the PARALLEL but before we can do that we need to know
4643 whether there are any packed fields. The ABI obviously doesn't
4644 specify how structures are passed in this case, so they are
4645 defined to be passed in int regs if possible, otherwise memory,
4646 regardless of whether there are fp values present. */
4647
4648 if (! packed_p)
4649 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4650 {
4651 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4652 {
4653 packed_p = true;
4654 break;
4655 }
4656 }
4657
4658 /* Compute how many registers we need. */
4659 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4660 {
4661 if (TREE_CODE (field) == FIELD_DECL)
4662 {
4663 HOST_WIDE_INT bitpos = startbitpos;
4664
4665 if (DECL_SIZE (field) != 0)
4666 {
4667 if (integer_zerop (DECL_SIZE (field)))
4668 continue;
4669
4670 if (host_integerp (bit_position (field), 1))
4671 bitpos += int_bit_position (field);
4672 }
4673
4674 /* ??? FIXME: else assume zero offset. */
4675
4676 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4677 function_arg_record_value_1 (TREE_TYPE (field),
4678 bitpos,
4679 parms,
4680 packed_p);
4681 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4682 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4683 && TARGET_FPU
4684 && parms->named
4685 && ! packed_p)
4686 {
4687 if (parms->intoffset != -1)
4688 {
4689 unsigned int startbit, endbit;
4690 int intslots, this_slotno;
4691
4692 startbit = parms->intoffset & -BITS_PER_WORD;
4693 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4694
4695 intslots = (endbit - startbit) / BITS_PER_WORD;
4696 this_slotno = parms->slotno + parms->intoffset
4697 / BITS_PER_WORD;
4698
4699 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4700 {
4701 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4702 /* We need to pass this field on the stack. */
4703 parms->stack = 1;
4704 }
4705
4706 parms->nregs += intslots;
4707 parms->intoffset = -1;
4708 }
4709
4710 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4711 If it wasn't true we wouldn't be here. */
4712 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4713 && DECL_MODE (field) == BLKmode)
4714 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4715 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4716 parms->nregs += 2;
4717 else
4718 parms->nregs += 1;
4719 }
4720 else
4721 {
4722 if (parms->intoffset == -1)
4723 parms->intoffset = bitpos;
4724 }
4725 }
4726 }
4727 }
4728
4729 /* A subroutine of function_arg_record_value. Assign the bits of the
4730 structure between parms->intoffset and bitpos to integer registers. */
4731
4732 static void
4733 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4734 struct function_arg_record_value_parms *parms)
4735 {
4736 enum machine_mode mode;
4737 unsigned int regno;
4738 unsigned int startbit, endbit;
4739 int this_slotno, intslots, intoffset;
4740 rtx reg;
4741
4742 if (parms->intoffset == -1)
4743 return;
4744
4745 intoffset = parms->intoffset;
4746 parms->intoffset = -1;
4747
4748 startbit = intoffset & -BITS_PER_WORD;
4749 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4750 intslots = (endbit - startbit) / BITS_PER_WORD;
4751 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4752
4753 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4754 if (intslots <= 0)
4755 return;
4756
4757 /* If this is the trailing part of a word, only load that much into
4758 the register. Otherwise load the whole register. Note that in
4759 the latter case we may pick up unwanted bits. It's not a problem
4760 at the moment but may wish to revisit. */
4761
4762 if (intoffset % BITS_PER_WORD != 0)
4763 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4764 MODE_INT);
4765 else
4766 mode = word_mode;
4767
4768 intoffset /= BITS_PER_UNIT;
4769 do
4770 {
4771 regno = parms->regbase + this_slotno;
4772 reg = gen_rtx_REG (mode, regno);
4773 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4774 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4775
4776 this_slotno += 1;
4777 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4778 mode = word_mode;
4779 parms->nregs += 1;
4780 intslots -= 1;
4781 }
4782 while (intslots > 0);
4783 }
4784
4785 /* A subroutine of function_arg_record_value. Traverse the structure
4786 recursively and assign bits to floating point registers. Track which
4787 bits in between need integer registers; invoke function_arg_record_value_3
4788 to make that happen. */
4789
4790 static void
4791 function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4792 struct function_arg_record_value_parms *parms,
4793 bool packed_p)
4794 {
4795 tree field;
4796
4797 if (! packed_p)
4798 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4799 {
4800 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4801 {
4802 packed_p = true;
4803 break;
4804 }
4805 }
4806
4807 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4808 {
4809 if (TREE_CODE (field) == FIELD_DECL)
4810 {
4811 HOST_WIDE_INT bitpos = startbitpos;
4812
4813 if (DECL_SIZE (field) != 0)
4814 {
4815 if (integer_zerop (DECL_SIZE (field)))
4816 continue;
4817
4818 if (host_integerp (bit_position (field), 1))
4819 bitpos += int_bit_position (field);
4820 }
4821
4822 /* ??? FIXME: else assume zero offset. */
4823
4824 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4825 function_arg_record_value_2 (TREE_TYPE (field),
4826 bitpos,
4827 parms,
4828 packed_p);
4829 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4830 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4831 && TARGET_FPU
4832 && parms->named
4833 && ! packed_p)
4834 {
4835 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4836 int regno, nregs, pos;
4837 enum machine_mode mode = DECL_MODE (field);
4838 rtx reg;
4839
4840 function_arg_record_value_3 (bitpos, parms);
4841
4842 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4843 && mode == BLKmode)
4844 {
4845 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4846 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4847 }
4848 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4849 {
4850 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4851 nregs = 2;
4852 }
4853 else
4854 nregs = 1;
4855
4856 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4857 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4858 regno++;
4859 reg = gen_rtx_REG (mode, regno);
4860 pos = bitpos / BITS_PER_UNIT;
4861 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4862 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4863 parms->nregs += 1;
4864 while (--nregs > 0)
4865 {
4866 regno += GET_MODE_SIZE (mode) / 4;
4867 reg = gen_rtx_REG (mode, regno);
4868 pos += GET_MODE_SIZE (mode);
4869 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4870 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4871 parms->nregs += 1;
4872 }
4873 }
4874 else
4875 {
4876 if (parms->intoffset == -1)
4877 parms->intoffset = bitpos;
4878 }
4879 }
4880 }
4881 }
4882
4883 /* Used by function_arg and function_value to implement the complex
4884 conventions of the 64-bit ABI for passing and returning structures.
4885 Return an expression valid as a return value for the two macros
4886 FUNCTION_ARG and FUNCTION_VALUE.
4887
4888 TYPE is the data type of the argument (as a tree).
4889 This is null for libcalls where that information may
4890 not be available.
4891 MODE is the argument's machine mode.
4892 SLOTNO is the index number of the argument's slot in the parameter array.
4893 NAMED is nonzero if this argument is a named parameter
4894 (otherwise it is an extra parameter matching an ellipsis).
4895 REGBASE is the regno of the base register for the parameter array. */
4896
4897 static rtx
4898 function_arg_record_value (tree type, enum machine_mode mode,
4899 int slotno, int named, int regbase)
4900 {
4901 HOST_WIDE_INT typesize = int_size_in_bytes (type);
4902 struct function_arg_record_value_parms parms;
4903 unsigned int nregs;
4904
4905 parms.ret = NULL_RTX;
4906 parms.slotno = slotno;
4907 parms.named = named;
4908 parms.regbase = regbase;
4909 parms.stack = 0;
4910
4911 /* Compute how many registers we need. */
4912 parms.nregs = 0;
4913 parms.intoffset = 0;
4914 function_arg_record_value_1 (type, 0, &parms, false);
4915
4916 /* Take into account pending integer fields. */
4917 if (parms.intoffset != -1)
4918 {
4919 unsigned int startbit, endbit;
4920 int intslots, this_slotno;
4921
4922 startbit = parms.intoffset & -BITS_PER_WORD;
4923 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4924 intslots = (endbit - startbit) / BITS_PER_WORD;
4925 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4926
4927 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4928 {
4929 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4930 /* We need to pass this field on the stack. */
4931 parms.stack = 1;
4932 }
4933
4934 parms.nregs += intslots;
4935 }
4936 nregs = parms.nregs;
4937
4938 /* Allocate the vector and handle some annoying special cases. */
4939 if (nregs == 0)
4940 {
4941 /* ??? Empty structure has no value? Duh? */
4942 if (typesize <= 0)
4943 {
4944 /* Though there's nothing really to store, return a word register
4945 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
4946 leads to breakage due to the fact that there are zero bytes to
4947 load. */
4948 return gen_rtx_REG (mode, regbase);
4949 }
4950 else
4951 {
4952 /* ??? C++ has structures with no fields, and yet a size. Give up
4953 for now and pass everything back in integer registers. */
4954 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4955 }
4956 if (nregs + slotno > SPARC_INT_ARG_MAX)
4957 nregs = SPARC_INT_ARG_MAX - slotno;
4958 }
4959 gcc_assert (nregs != 0);
4960
4961 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4962
4963 /* If at least one field must be passed on the stack, generate
4964 (parallel [(expr_list (nil) ...) ...]) so that all fields will
4965 also be passed on the stack. We can't do much better because the
4966 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4967 of structures for which the fields passed exclusively in registers
4968 are not at the beginning of the structure. */
4969 if (parms.stack)
4970 XVECEXP (parms.ret, 0, 0)
4971 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4972
4973 /* Fill in the entries. */
4974 parms.nregs = 0;
4975 parms.intoffset = 0;
4976 function_arg_record_value_2 (type, 0, &parms, false);
4977 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4978
4979 gcc_assert (parms.nregs == nregs);
4980
4981 return parms.ret;
4982 }
4983
4984 /* Used by function_arg and function_value to implement the conventions
4985 of the 64-bit ABI for passing and returning unions.
4986 Return an expression valid as a return value for the two macros
4987 FUNCTION_ARG and FUNCTION_VALUE.
4988
4989 SIZE is the size in bytes of the union.
4990 MODE is the argument's machine mode.
4991 REGNO is the hard register the union will be passed in. */
4992
4993 static rtx
4994 function_arg_union_value (int size, enum machine_mode mode, int slotno,
4995 int regno)
4996 {
4997 int nwords = ROUND_ADVANCE (size), i;
4998 rtx regs;
4999
5000 /* See comment in previous function for empty structures. */
5001 if (nwords == 0)
5002 return gen_rtx_REG (mode, regno);
5003
5004 if (slotno == SPARC_INT_ARG_MAX - 1)
5005 nwords = 1;
5006
5007 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5008
5009 for (i = 0; i < nwords; i++)
5010 {
5011 /* Unions are passed left-justified. */
5012 XVECEXP (regs, 0, i)
5013 = gen_rtx_EXPR_LIST (VOIDmode,
5014 gen_rtx_REG (word_mode, regno),
5015 GEN_INT (UNITS_PER_WORD * i));
5016 regno++;
5017 }
5018
5019 return regs;
5020 }
5021
5022 /* Used by function_arg and function_value to implement the conventions
5023 for passing and returning large (BLKmode) vectors.
5024 Return an expression valid as a return value for the two macros
5025 FUNCTION_ARG and FUNCTION_VALUE.
5026
5027 SIZE is the size in bytes of the vector.
5028 BASE_MODE is the argument's base machine mode.
5029 REGNO is the FP hard register the vector will be passed in. */
5030
5031 static rtx
5032 function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5033 {
5034 unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5035 int nregs = size / base_mode_size, i;
5036 rtx regs;
5037
5038 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5039
5040 for (i = 0; i < nregs; i++)
5041 {
5042 XVECEXP (regs, 0, i)
5043 = gen_rtx_EXPR_LIST (VOIDmode,
5044 gen_rtx_REG (base_mode, regno),
5045 GEN_INT (base_mode_size * i));
5046 regno += base_mode_size / 4;
5047 }
5048
5049 return regs;
5050 }
5051
5052 /* Handle the FUNCTION_ARG macro.
5053 Determine where to put an argument to a function.
5054 Value is zero to push the argument on the stack,
5055 or a hard register in which to store the argument.
5056
5057 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5058 the preceding args and about the function being called.
5059 MODE is the argument's machine mode.
5060 TYPE is the data type of the argument (as a tree).
5061 This is null for libcalls where that information may
5062 not be available.
5063 NAMED is nonzero if this argument is a named parameter
5064 (otherwise it is an extra parameter matching an ellipsis).
5065 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
5066
5067 rtx
5068 function_arg (const struct sparc_args *cum, enum machine_mode mode,
5069 tree type, int named, int incoming_p)
5070 {
5071 int regbase = (incoming_p
5072 ? SPARC_INCOMING_INT_ARG_FIRST
5073 : SPARC_OUTGOING_INT_ARG_FIRST);
5074 int slotno, regno, padding;
5075 enum mode_class mclass = GET_MODE_CLASS (mode);
5076 rtx reg;
5077
5078 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5079 &regno, &padding);
5080
5081 if (slotno == -1)
5082 return 0;
5083
5084 if (TARGET_ARCH32)
5085 {
5086 reg = gen_rtx_REG (mode, regno);
5087 return reg;
5088 }
5089
5090 if (type && TREE_CODE (type) == RECORD_TYPE)
5091 {
5092 /* Structures up to 16 bytes in size are passed in arg slots on the
5093 stack and are promoted to registers where possible. */
5094
5095 gcc_assert (int_size_in_bytes (type) <= 16);
5096
5097 return function_arg_record_value (type, mode, slotno, named, regbase);
5098 }
5099 else if (type && TREE_CODE (type) == UNION_TYPE)
5100 {
5101 HOST_WIDE_INT size = int_size_in_bytes (type);
5102
5103 gcc_assert (size <= 16);
5104
5105 return function_arg_union_value (size, mode, slotno, regno);
5106 }
5107 else if (type && TREE_CODE (type) == VECTOR_TYPE)
5108 {
5109 /* Vector types deserve special treatment because they are
5110 polymorphic wrt their mode, depending upon whether VIS
5111 instructions are enabled. */
5112 HOST_WIDE_INT size = int_size_in_bytes (type);
5113
5114 gcc_assert (size <= 16);
5115
5116 if (mode == BLKmode)
5117 return function_arg_vector_value (size,
5118 TYPE_MODE (TREE_TYPE (type)),
5119 SPARC_FP_ARG_FIRST + 2*slotno);
5120 else
5121 mclass = MODE_FLOAT;
5122 }
5123
5124 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5125 but also have the slot allocated for them.
5126 If no prototype is in scope fp values in register slots get passed
5127 in two places, either fp regs and int regs or fp regs and memory. */
5128 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5129 && SPARC_FP_REG_P (regno))
5130 {
5131 reg = gen_rtx_REG (mode, regno);
5132 if (cum->prototype_p || cum->libcall_p)
5133 {
5134 /* "* 2" because fp reg numbers are recorded in 4 byte
5135 quantities. */
5136 #if 0
5137 /* ??? This will cause the value to be passed in the fp reg and
5138 in the stack. When a prototype exists we want to pass the
5139 value in the reg but reserve space on the stack. That's an
5140 optimization, and is deferred [for a bit]. */
5141 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5142 return gen_rtx_PARALLEL (mode,
5143 gen_rtvec (2,
5144 gen_rtx_EXPR_LIST (VOIDmode,
5145 NULL_RTX, const0_rtx),
5146 gen_rtx_EXPR_LIST (VOIDmode,
5147 reg, const0_rtx)));
5148 else
5149 #else
5150 /* ??? It seems that passing back a register even when past
5151 the area declared by REG_PARM_STACK_SPACE will allocate
5152 space appropriately, and will not copy the data onto the
5153 stack, exactly as we desire.
5154
5155 This is due to locate_and_pad_parm being called in
5156 expand_call whenever reg_parm_stack_space > 0, which
5157 while beneficial to our example here, would seem to be
5158 in error from what had been intended. Ho hum... -- r~ */
5159 #endif
5160 return reg;
5161 }
5162 else
5163 {
5164 rtx v0, v1;
5165
5166 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5167 {
5168 int intreg;
5169
5170 /* On incoming, we don't need to know that the value
5171 is passed in %f0 and %i0, and it confuses other parts
5172 causing needless spillage even on the simplest cases. */
5173 if (incoming_p)
5174 return reg;
5175
5176 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5177 + (regno - SPARC_FP_ARG_FIRST) / 2);
5178
5179 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5180 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5181 const0_rtx);
5182 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5183 }
5184 else
5185 {
5186 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5187 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5188 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5189 }
5190 }
5191 }
5192 else
5193 {
5194 /* Scalar or complex int. */
5195 reg = gen_rtx_REG (mode, regno);
5196 }
5197
5198 return reg;
5199 }
5200
5201 /* For an arg passed partly in registers and partly in memory,
5202 this is the number of bytes of registers used.
5203 For args passed entirely in registers or entirely in memory, zero.
5204
5205 Any arg that starts in the first 6 regs but won't entirely fit in them
5206 needs partial registers on v8. On v9, structures with integer
5207 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5208 values that begin in the last fp reg [where "last fp reg" varies with the
5209 mode] will be split between that reg and memory. */
5210
5211 static int
5212 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5213 tree type, bool named)
5214 {
5215 int slotno, regno, padding;
5216
5217 /* We pass 0 for incoming_p here, it doesn't matter. */
5218 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5219
5220 if (slotno == -1)
5221 return 0;
5222
5223 if (TARGET_ARCH32)
5224 {
5225 if ((slotno + (mode == BLKmode
5226 ? ROUND_ADVANCE (int_size_in_bytes (type))
5227 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5228 > SPARC_INT_ARG_MAX)
5229 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5230 }
5231 else
5232 {
5233 /* We are guaranteed by pass_by_reference that the size of the
5234 argument is not greater than 16 bytes, so we only need to return
5235 one word if the argument is partially passed in registers. */
5236
5237 if (type && AGGREGATE_TYPE_P (type))
5238 {
5239 int size = int_size_in_bytes (type);
5240
5241 if (size > UNITS_PER_WORD
5242 && slotno == SPARC_INT_ARG_MAX - 1)
5243 return UNITS_PER_WORD;
5244 }
5245 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5246 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5247 && ! (TARGET_FPU && named)))
5248 {
5249 /* The complex types are passed as packed types. */
5250 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5251 && slotno == SPARC_INT_ARG_MAX - 1)
5252 return UNITS_PER_WORD;
5253 }
5254 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5255 {
5256 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5257 > SPARC_FP_ARG_MAX)
5258 return UNITS_PER_WORD;
5259 }
5260 }
5261
5262 return 0;
5263 }
5264
5265 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5266 Specify whether to pass the argument by reference. */
5267
5268 static bool
5269 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5270 enum machine_mode mode, tree type,
5271 bool named ATTRIBUTE_UNUSED)
5272 {
5273 if (TARGET_ARCH32)
5274 {
5275 /* Original SPARC 32-bit ABI says that structures and unions,
5276 and quad-precision floats are passed by reference. For Pascal,
5277 also pass arrays by reference. All other base types are passed
5278 in registers.
5279
5280 Extended ABI (as implemented by the Sun compiler) says that all
5281 complex floats are passed by reference. Pass complex integers
5282 in registers up to 8 bytes. More generally, enforce the 2-word
5283 cap for passing arguments in registers.
5284
5285 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5286 integers are passed like floats of the same size, that is in
5287 registers up to 8 bytes. Pass all vector floats by reference
5288 like structure and unions. */
5289 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5290 || mode == SCmode
5291 /* Catch CDImode, TFmode, DCmode and TCmode. */
5292 || GET_MODE_SIZE (mode) > 8
5293 || (type
5294 && TREE_CODE (type) == VECTOR_TYPE
5295 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5296 }
5297 else
5298 {
5299 /* Original SPARC 64-bit ABI says that structures and unions
5300 smaller than 16 bytes are passed in registers, as well as
5301 all other base types. For Pascal, pass arrays by reference.
5302
5303 Extended ABI (as implemented by the Sun compiler) says that
5304 complex floats are passed in registers up to 16 bytes. Pass
5305 all complex integers in registers up to 16 bytes. More generally,
5306 enforce the 2-word cap for passing arguments in registers.
5307
5308 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5309 integers are passed like floats of the same size, that is in
5310 registers (up to 16 bytes). Pass all vector floats like structure
5311 and unions. */
5312 return ((type && TREE_CODE (type) == ARRAY_TYPE)
5313 || (type
5314 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5315 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5316 /* Catch CTImode and TCmode. */
5317 || GET_MODE_SIZE (mode) > 16);
5318 }
5319 }
5320
5321 /* Handle the FUNCTION_ARG_ADVANCE macro.
5322 Update the data in CUM to advance over an argument
5323 of mode MODE and data type TYPE.
5324 TYPE is null for libcalls where that information may not be available. */
5325
5326 void
5327 function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5328 tree type, int named)
5329 {
5330 int slotno, regno, padding;
5331
5332 /* We pass 0 for incoming_p here, it doesn't matter. */
5333 slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5334
5335 /* If register required leading padding, add it. */
5336 if (slotno != -1)
5337 cum->words += padding;
5338
5339 if (TARGET_ARCH32)
5340 {
5341 cum->words += (mode != BLKmode
5342 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5343 : ROUND_ADVANCE (int_size_in_bytes (type)));
5344 }
5345 else
5346 {
5347 if (type && AGGREGATE_TYPE_P (type))
5348 {
5349 int size = int_size_in_bytes (type);
5350
5351 if (size <= 8)
5352 ++cum->words;
5353 else if (size <= 16)
5354 cum->words += 2;
5355 else /* passed by reference */
5356 ++cum->words;
5357 }
5358 else
5359 {
5360 cum->words += (mode != BLKmode
5361 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5362 : ROUND_ADVANCE (int_size_in_bytes (type)));
5363 }
5364 }
5365 }
5366
5367 /* Handle the FUNCTION_ARG_PADDING macro.
5368 For the 64 bit ABI structs are always stored left shifted in their
5369 argument slot. */
5370
5371 enum direction
5372 function_arg_padding (enum machine_mode mode, tree type)
5373 {
5374 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5375 return upward;
5376
5377 /* Fall back to the default. */
5378 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5379 }
5380
5381 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5382 Specify whether to return the return value in memory. */
5383
5384 static bool
5385 sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5386 {
5387 if (TARGET_ARCH32)
5388 /* Original SPARC 32-bit ABI says that structures and unions,
5389 and quad-precision floats are returned in memory. All other
5390 base types are returned in registers.
5391
5392 Extended ABI (as implemented by the Sun compiler) says that
5393 all complex floats are returned in registers (8 FP registers
5394 at most for '_Complex long double'). Return all complex integers
5395 in registers (4 at most for '_Complex long long').
5396
5397 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5398 integers are returned like floats of the same size, that is in
5399 registers up to 8 bytes and in memory otherwise. Return all
5400 vector floats in memory like structure and unions; note that
5401 they always have BLKmode like the latter. */
5402 return (TYPE_MODE (type) == BLKmode
5403 || TYPE_MODE (type) == TFmode
5404 || (TREE_CODE (type) == VECTOR_TYPE
5405 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5406 else
5407 /* Original SPARC 64-bit ABI says that structures and unions
5408 smaller than 32 bytes are returned in registers, as well as
5409 all other base types.
5410
5411 Extended ABI (as implemented by the Sun compiler) says that all
5412 complex floats are returned in registers (8 FP registers at most
5413 for '_Complex long double'). Return all complex integers in
5414 registers (4 at most for '_Complex TItype').
5415
5416 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5417 integers are returned like floats of the same size, that is in
5418 registers. Return all vector floats like structure and unions;
5419 note that they always have BLKmode like the latter. */
5420 return ((TYPE_MODE (type) == BLKmode
5421 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5422 }
5423
5424 /* Handle the TARGET_STRUCT_VALUE target hook.
5425 Return where to find the structure return value address. */
5426
5427 static rtx
5428 sparc_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED, int incoming)
5429 {
5430 if (TARGET_ARCH64)
5431 return 0;
5432 else
5433 {
5434 rtx mem;
5435
5436 if (incoming)
5437 mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5438 STRUCT_VALUE_OFFSET));
5439 else
5440 mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5441 STRUCT_VALUE_OFFSET));
5442
5443 set_mem_alias_set (mem, struct_value_alias_set);
5444 return mem;
5445 }
5446 }
5447
5448 /* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5449 For v9, function return values are subject to the same rules as arguments,
5450 except that up to 32 bytes may be returned in registers. */
5451
5452 rtx
5453 function_value (tree type, enum machine_mode mode, int incoming_p)
5454 {
5455 /* Beware that the two values are swapped here wrt function_arg. */
5456 int regbase = (incoming_p
5457 ? SPARC_OUTGOING_INT_ARG_FIRST
5458 : SPARC_INCOMING_INT_ARG_FIRST);
5459 enum mode_class mclass = GET_MODE_CLASS (mode);
5460 int regno;
5461
5462 if (type && TREE_CODE (type) == VECTOR_TYPE)
5463 {
5464 /* Vector types deserve special treatment because they are
5465 polymorphic wrt their mode, depending upon whether VIS
5466 instructions are enabled. */
5467 HOST_WIDE_INT size = int_size_in_bytes (type);
5468
5469 gcc_assert ((TARGET_ARCH32 && size <= 8)
5470 || (TARGET_ARCH64 && size <= 32));
5471
5472 if (mode == BLKmode)
5473 return function_arg_vector_value (size,
5474 TYPE_MODE (TREE_TYPE (type)),
5475 SPARC_FP_ARG_FIRST);
5476 else
5477 mclass = MODE_FLOAT;
5478 }
5479 else if (type && TARGET_ARCH64)
5480 {
5481 if (TREE_CODE (type) == RECORD_TYPE)
5482 {
5483 /* Structures up to 32 bytes in size are passed in registers,
5484 promoted to fp registers where possible. */
5485
5486 gcc_assert (int_size_in_bytes (type) <= 32);
5487
5488 return function_arg_record_value (type, mode, 0, 1, regbase);
5489 }
5490 else if (TREE_CODE (type) == UNION_TYPE)
5491 {
5492 HOST_WIDE_INT size = int_size_in_bytes (type);
5493
5494 gcc_assert (size <= 32);
5495
5496 return function_arg_union_value (size, mode, 0, regbase);
5497 }
5498 else if (AGGREGATE_TYPE_P (type))
5499 {
5500 /* All other aggregate types are passed in an integer register
5501 in a mode corresponding to the size of the type. */
5502 HOST_WIDE_INT bytes = int_size_in_bytes (type);
5503
5504 gcc_assert (bytes <= 32);
5505
5506 mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
5507
5508 /* ??? We probably should have made the same ABI change in
5509 3.4.0 as the one we made for unions. The latter was
5510 required by the SCD though, while the former is not
5511 specified, so we favored compatibility and efficiency.
5512
5513 Now we're stuck for aggregates larger than 16 bytes,
5514 because OImode vanished in the meantime. Let's not
5515 try to be unduly clever, and simply follow the ABI
5516 for unions in that case. */
5517 if (mode == BLKmode)
5518 return function_arg_union_value (bytes, mode, 0, regbase);
5519 else
5520 mclass = MODE_INT;
5521 }
5522 else if (mclass == MODE_INT
5523 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5524 mode = word_mode;
5525 }
5526
5527 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5528 && TARGET_FPU)
5529 regno = SPARC_FP_ARG_FIRST;
5530 else
5531 regno = regbase;
5532
5533 return gen_rtx_REG (mode, regno);
5534 }
5535
5536 /* Do what is necessary for `va_start'. We look at the current function
5537 to determine if stdarg or varargs is used and return the address of
5538 the first unnamed parameter. */
5539
5540 static rtx
5541 sparc_builtin_saveregs (void)
5542 {
5543 int first_reg = current_function_args_info.words;
5544 rtx address;
5545 int regno;
5546
5547 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5548 emit_move_insn (gen_rtx_MEM (word_mode,
5549 gen_rtx_PLUS (Pmode,
5550 frame_pointer_rtx,
5551 GEN_INT (FIRST_PARM_OFFSET (0)
5552 + (UNITS_PER_WORD
5553 * regno)))),
5554 gen_rtx_REG (word_mode,
5555 SPARC_INCOMING_INT_ARG_FIRST + regno));
5556
5557 address = gen_rtx_PLUS (Pmode,
5558 frame_pointer_rtx,
5559 GEN_INT (FIRST_PARM_OFFSET (0)
5560 + UNITS_PER_WORD * first_reg));
5561
5562 return address;
5563 }
5564
5565 /* Implement `va_start' for stdarg. */
5566
5567 void
5568 sparc_va_start (tree valist, rtx nextarg)
5569 {
5570 nextarg = expand_builtin_saveregs ();
5571 std_expand_builtin_va_start (valist, nextarg);
5572 }
5573
5574 /* Implement `va_arg' for stdarg. */
5575
5576 static tree
5577 sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5578 {
5579 HOST_WIDE_INT size, rsize, align;
5580 tree addr, incr;
5581 bool indirect;
5582 tree ptrtype = build_pointer_type (type);
5583
5584 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5585 {
5586 indirect = true;
5587 size = rsize = UNITS_PER_WORD;
5588 align = 0;
5589 }
5590 else
5591 {
5592 indirect = false;
5593 size = int_size_in_bytes (type);
5594 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5595 align = 0;
5596
5597 if (TARGET_ARCH64)
5598 {
5599 /* For SPARC64, objects requiring 16-byte alignment get it. */
5600 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5601 align = 2 * UNITS_PER_WORD;
5602
5603 /* SPARC-V9 ABI states that structures up to 16 bytes in size
5604 are left-justified in their slots. */
5605 if (AGGREGATE_TYPE_P (type))
5606 {
5607 if (size == 0)
5608 size = rsize = UNITS_PER_WORD;
5609 else
5610 size = rsize;
5611 }
5612 }
5613 }
5614
5615 incr = valist;
5616 if (align)
5617 {
5618 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5619 ssize_int (align - 1)));
5620 incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5621 ssize_int (-align)));
5622 }
5623
5624 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5625 addr = incr;
5626
5627 if (BYTES_BIG_ENDIAN && size < rsize)
5628 addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5629 ssize_int (rsize - size)));
5630
5631 if (indirect)
5632 {
5633 addr = fold_convert (build_pointer_type (ptrtype), addr);
5634 addr = build_va_arg_indirect_ref (addr);
5635 }
5636 /* If the address isn't aligned properly for the type,
5637 we may need to copy to a temporary.
5638 FIXME: This is inefficient. Usually we can do this
5639 in registers. */
5640 else if (align == 0
5641 && TYPE_ALIGN (type) > BITS_PER_WORD)
5642 {
5643 tree tmp = create_tmp_var (type, "va_arg_tmp");
5644 tree dest_addr = build_fold_addr_expr (tmp);
5645
5646 tree copy = build_function_call_expr
5647 (implicit_built_in_decls[BUILT_IN_MEMCPY],
5648 tree_cons (NULL_TREE, dest_addr,
5649 tree_cons (NULL_TREE, addr,
5650 tree_cons (NULL_TREE, size_int (rsize),
5651 NULL_TREE))));
5652
5653 gimplify_and_add (copy, pre_p);
5654 addr = dest_addr;
5655 }
5656 else
5657 addr = fold_convert (ptrtype, addr);
5658
5659 incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5660 incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5661 gimplify_and_add (incr, post_p);
5662
5663 return build_va_arg_indirect_ref (addr);
5664 }
5665 \f
5666 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5667 Specify whether the vector mode is supported by the hardware. */
5668
5669 static bool
5670 sparc_vector_mode_supported_p (enum machine_mode mode)
5671 {
5672 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5673 }
5674 \f
5675 /* Return the string to output an unconditional branch to LABEL, which is
5676 the operand number of the label.
5677
5678 DEST is the destination insn (i.e. the label), INSN is the source. */
5679
5680 const char *
5681 output_ubranch (rtx dest, int label, rtx insn)
5682 {
5683 static char string[64];
5684 bool v9_form = false;
5685 char *p;
5686
5687 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5688 {
5689 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5690 - INSN_ADDRESSES (INSN_UID (insn)));
5691 /* Leave some instructions for "slop". */
5692 if (delta >= -260000 && delta < 260000)
5693 v9_form = true;
5694 }
5695
5696 if (v9_form)
5697 strcpy (string, "ba%*,pt\t%%xcc, ");
5698 else
5699 strcpy (string, "b%*\t");
5700
5701 p = strchr (string, '\0');
5702 *p++ = '%';
5703 *p++ = 'l';
5704 *p++ = '0' + label;
5705 *p++ = '%';
5706 *p++ = '(';
5707 *p = '\0';
5708
5709 return string;
5710 }
5711
5712 /* Return the string to output a conditional branch to LABEL, which is
5713 the operand number of the label. OP is the conditional expression.
5714 XEXP (OP, 0) is assumed to be a condition code register (integer or
5715 floating point) and its mode specifies what kind of comparison we made.
5716
5717 DEST is the destination insn (i.e. the label), INSN is the source.
5718
5719 REVERSED is nonzero if we should reverse the sense of the comparison.
5720
5721 ANNUL is nonzero if we should generate an annulling branch. */
5722
5723 const char *
5724 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5725 rtx insn)
5726 {
5727 static char string[64];
5728 enum rtx_code code = GET_CODE (op);
5729 rtx cc_reg = XEXP (op, 0);
5730 enum machine_mode mode = GET_MODE (cc_reg);
5731 const char *labelno, *branch;
5732 int spaces = 8, far;
5733 char *p;
5734
5735 /* v9 branches are limited to +-1MB. If it is too far away,
5736 change
5737
5738 bne,pt %xcc, .LC30
5739
5740 to
5741
5742 be,pn %xcc, .+12
5743 nop
5744 ba .LC30
5745
5746 and
5747
5748 fbne,a,pn %fcc2, .LC29
5749
5750 to
5751
5752 fbe,pt %fcc2, .+16
5753 nop
5754 ba .LC29 */
5755
5756 far = TARGET_V9 && (get_attr_length (insn) >= 3);
5757 if (reversed ^ far)
5758 {
5759 /* Reversal of FP compares takes care -- an ordered compare
5760 becomes an unordered compare and vice versa. */
5761 if (mode == CCFPmode || mode == CCFPEmode)
5762 code = reverse_condition_maybe_unordered (code);
5763 else
5764 code = reverse_condition (code);
5765 }
5766
5767 /* Start by writing the branch condition. */
5768 if (mode == CCFPmode || mode == CCFPEmode)
5769 {
5770 switch (code)
5771 {
5772 case NE:
5773 branch = "fbne";
5774 break;
5775 case EQ:
5776 branch = "fbe";
5777 break;
5778 case GE:
5779 branch = "fbge";
5780 break;
5781 case GT:
5782 branch = "fbg";
5783 break;
5784 case LE:
5785 branch = "fble";
5786 break;
5787 case LT:
5788 branch = "fbl";
5789 break;
5790 case UNORDERED:
5791 branch = "fbu";
5792 break;
5793 case ORDERED:
5794 branch = "fbo";
5795 break;
5796 case UNGT:
5797 branch = "fbug";
5798 break;
5799 case UNLT:
5800 branch = "fbul";
5801 break;
5802 case UNEQ:
5803 branch = "fbue";
5804 break;
5805 case UNGE:
5806 branch = "fbuge";
5807 break;
5808 case UNLE:
5809 branch = "fbule";
5810 break;
5811 case LTGT:
5812 branch = "fblg";
5813 break;
5814
5815 default:
5816 gcc_unreachable ();
5817 }
5818
5819 /* ??? !v9: FP branches cannot be preceded by another floating point
5820 insn. Because there is currently no concept of pre-delay slots,
5821 we can fix this only by always emitting a nop before a floating
5822 point branch. */
5823
5824 string[0] = '\0';
5825 if (! TARGET_V9)
5826 strcpy (string, "nop\n\t");
5827 strcat (string, branch);
5828 }
5829 else
5830 {
5831 switch (code)
5832 {
5833 case NE:
5834 branch = "bne";
5835 break;
5836 case EQ:
5837 branch = "be";
5838 break;
5839 case GE:
5840 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5841 branch = "bpos";
5842 else
5843 branch = "bge";
5844 break;
5845 case GT:
5846 branch = "bg";
5847 break;
5848 case LE:
5849 branch = "ble";
5850 break;
5851 case LT:
5852 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5853 branch = "bneg";
5854 else
5855 branch = "bl";
5856 break;
5857 case GEU:
5858 branch = "bgeu";
5859 break;
5860 case GTU:
5861 branch = "bgu";
5862 break;
5863 case LEU:
5864 branch = "bleu";
5865 break;
5866 case LTU:
5867 branch = "blu";
5868 break;
5869
5870 default:
5871 gcc_unreachable ();
5872 }
5873 strcpy (string, branch);
5874 }
5875 spaces -= strlen (branch);
5876 p = strchr (string, '\0');
5877
5878 /* Now add the annulling, the label, and a possible noop. */
5879 if (annul && ! far)
5880 {
5881 strcpy (p, ",a");
5882 p += 2;
5883 spaces -= 2;
5884 }
5885
5886 if (TARGET_V9)
5887 {
5888 rtx note;
5889 int v8 = 0;
5890
5891 if (! far && insn && INSN_ADDRESSES_SET_P ())
5892 {
5893 int delta = (INSN_ADDRESSES (INSN_UID (dest))
5894 - INSN_ADDRESSES (INSN_UID (insn)));
5895 /* Leave some instructions for "slop". */
5896 if (delta < -260000 || delta >= 260000)
5897 v8 = 1;
5898 }
5899
5900 if (mode == CCFPmode || mode == CCFPEmode)
5901 {
5902 static char v9_fcc_labelno[] = "%%fccX, ";
5903 /* Set the char indicating the number of the fcc reg to use. */
5904 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5905 labelno = v9_fcc_labelno;
5906 if (v8)
5907 {
5908 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5909 labelno = "";
5910 }
5911 }
5912 else if (mode == CCXmode || mode == CCX_NOOVmode)
5913 {
5914 labelno = "%%xcc, ";
5915 gcc_assert (! v8);
5916 }
5917 else
5918 {
5919 labelno = "%%icc, ";
5920 if (v8)
5921 labelno = "";
5922 }
5923
5924 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5925 {
5926 strcpy (p,
5927 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5928 ? ",pt" : ",pn");
5929 p += 3;
5930 spaces -= 3;
5931 }
5932 }
5933 else
5934 labelno = "";
5935
5936 if (spaces > 0)
5937 *p++ = '\t';
5938 else
5939 *p++ = ' ';
5940 strcpy (p, labelno);
5941 p = strchr (p, '\0');
5942 if (far)
5943 {
5944 strcpy (p, ".+12\n\t nop\n\tb\t");
5945 /* Skip the next insn if requested or
5946 if we know that it will be a nop. */
5947 if (annul || ! final_sequence)
5948 p[3] = '6';
5949 p += 14;
5950 }
5951 *p++ = '%';
5952 *p++ = 'l';
5953 *p++ = label + '0';
5954 *p++ = '%';
5955 *p++ = '#';
5956 *p = '\0';
5957
5958 return string;
5959 }
5960
5961 /* Emit a library call comparison between floating point X and Y.
5962 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
5963 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
5964 values as arguments instead of the TFmode registers themselves,
5965 that's why we cannot call emit_float_lib_cmp. */
5966 void
5967 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
5968 {
5969 const char *qpfunc;
5970 rtx slot0, slot1, result, tem, tem2;
5971 enum machine_mode mode;
5972
5973 switch (comparison)
5974 {
5975 case EQ:
5976 qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
5977 break;
5978
5979 case NE:
5980 qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
5981 break;
5982
5983 case GT:
5984 qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
5985 break;
5986
5987 case GE:
5988 qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
5989 break;
5990
5991 case LT:
5992 qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
5993 break;
5994
5995 case LE:
5996 qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
5997 break;
5998
5999 case ORDERED:
6000 case UNORDERED:
6001 case UNGT:
6002 case UNLT:
6003 case UNEQ:
6004 case UNGE:
6005 case UNLE:
6006 case LTGT:
6007 qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6008 break;
6009
6010 default:
6011 gcc_unreachable ();
6012 }
6013
6014 if (TARGET_ARCH64)
6015 {
6016 if (GET_CODE (x) != MEM)
6017 {
6018 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6019 emit_move_insn (slot0, x);
6020 }
6021 else
6022 slot0 = x;
6023
6024 if (GET_CODE (y) != MEM)
6025 {
6026 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6027 emit_move_insn (slot1, y);
6028 }
6029 else
6030 slot1 = y;
6031
6032 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6033 DImode, 2,
6034 XEXP (slot0, 0), Pmode,
6035 XEXP (slot1, 0), Pmode);
6036
6037 mode = DImode;
6038 }
6039 else
6040 {
6041 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6042 SImode, 2,
6043 x, TFmode, y, TFmode);
6044
6045 mode = SImode;
6046 }
6047
6048
6049 /* Immediately move the result of the libcall into a pseudo
6050 register so reload doesn't clobber the value if it needs
6051 the return register for a spill reg. */
6052 result = gen_reg_rtx (mode);
6053 emit_move_insn (result, hard_libcall_value (mode));
6054
6055 switch (comparison)
6056 {
6057 default:
6058 emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6059 break;
6060 case ORDERED:
6061 case UNORDERED:
6062 emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6063 NULL_RTX, mode, 0);
6064 break;
6065 case UNGT:
6066 case UNGE:
6067 emit_cmp_insn (result, const1_rtx,
6068 comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6069 break;
6070 case UNLE:
6071 emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6072 break;
6073 case UNLT:
6074 tem = gen_reg_rtx (mode);
6075 if (TARGET_ARCH32)
6076 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6077 else
6078 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6079 emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6080 break;
6081 case UNEQ:
6082 case LTGT:
6083 tem = gen_reg_rtx (mode);
6084 if (TARGET_ARCH32)
6085 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6086 else
6087 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6088 tem2 = gen_reg_rtx (mode);
6089 if (TARGET_ARCH32)
6090 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6091 else
6092 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6093 emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6094 NULL_RTX, mode, 0);
6095 break;
6096 }
6097 }
6098
6099 /* Generate an unsigned DImode to FP conversion. This is the same code
6100 optabs would emit if we didn't have TFmode patterns. */
6101
6102 void
6103 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6104 {
6105 rtx neglab, donelab, i0, i1, f0, in, out;
6106
6107 out = operands[0];
6108 in = force_reg (DImode, operands[1]);
6109 neglab = gen_label_rtx ();
6110 donelab = gen_label_rtx ();
6111 i0 = gen_reg_rtx (DImode);
6112 i1 = gen_reg_rtx (DImode);
6113 f0 = gen_reg_rtx (mode);
6114
6115 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6116
6117 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6118 emit_jump_insn (gen_jump (donelab));
6119 emit_barrier ();
6120
6121 emit_label (neglab);
6122
6123 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6124 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6125 emit_insn (gen_iordi3 (i0, i0, i1));
6126 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6127 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6128
6129 emit_label (donelab);
6130 }
6131
6132 /* Generate an FP to unsigned DImode conversion. This is the same code
6133 optabs would emit if we didn't have TFmode patterns. */
6134
6135 void
6136 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6137 {
6138 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6139
6140 out = operands[0];
6141 in = force_reg (mode, operands[1]);
6142 neglab = gen_label_rtx ();
6143 donelab = gen_label_rtx ();
6144 i0 = gen_reg_rtx (DImode);
6145 i1 = gen_reg_rtx (DImode);
6146 limit = gen_reg_rtx (mode);
6147 f0 = gen_reg_rtx (mode);
6148
6149 emit_move_insn (limit,
6150 CONST_DOUBLE_FROM_REAL_VALUE (
6151 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6152 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6153
6154 emit_insn (gen_rtx_SET (VOIDmode,
6155 out,
6156 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6157 emit_jump_insn (gen_jump (donelab));
6158 emit_barrier ();
6159
6160 emit_label (neglab);
6161
6162 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6163 emit_insn (gen_rtx_SET (VOIDmode,
6164 i0,
6165 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6166 emit_insn (gen_movdi (i1, const1_rtx));
6167 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6168 emit_insn (gen_xordi3 (out, i0, i1));
6169
6170 emit_label (donelab);
6171 }
6172
6173 /* Return the string to output a conditional branch to LABEL, testing
6174 register REG. LABEL is the operand number of the label; REG is the
6175 operand number of the reg. OP is the conditional expression. The mode
6176 of REG says what kind of comparison we made.
6177
6178 DEST is the destination insn (i.e. the label), INSN is the source.
6179
6180 REVERSED is nonzero if we should reverse the sense of the comparison.
6181
6182 ANNUL is nonzero if we should generate an annulling branch. */
6183
6184 const char *
6185 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6186 int annul, rtx insn)
6187 {
6188 static char string[64];
6189 enum rtx_code code = GET_CODE (op);
6190 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6191 rtx note;
6192 int far;
6193 char *p;
6194
6195 /* branch on register are limited to +-128KB. If it is too far away,
6196 change
6197
6198 brnz,pt %g1, .LC30
6199
6200 to
6201
6202 brz,pn %g1, .+12
6203 nop
6204 ba,pt %xcc, .LC30
6205
6206 and
6207
6208 brgez,a,pn %o1, .LC29
6209
6210 to
6211
6212 brlz,pt %o1, .+16
6213 nop
6214 ba,pt %xcc, .LC29 */
6215
6216 far = get_attr_length (insn) >= 3;
6217
6218 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6219 if (reversed ^ far)
6220 code = reverse_condition (code);
6221
6222 /* Only 64 bit versions of these instructions exist. */
6223 gcc_assert (mode == DImode);
6224
6225 /* Start by writing the branch condition. */
6226
6227 switch (code)
6228 {
6229 case NE:
6230 strcpy (string, "brnz");
6231 break;
6232
6233 case EQ:
6234 strcpy (string, "brz");
6235 break;
6236
6237 case GE:
6238 strcpy (string, "brgez");
6239 break;
6240
6241 case LT:
6242 strcpy (string, "brlz");
6243 break;
6244
6245 case LE:
6246 strcpy (string, "brlez");
6247 break;
6248
6249 case GT:
6250 strcpy (string, "brgz");
6251 break;
6252
6253 default:
6254 gcc_unreachable ();
6255 }
6256
6257 p = strchr (string, '\0');
6258
6259 /* Now add the annulling, reg, label, and nop. */
6260 if (annul && ! far)
6261 {
6262 strcpy (p, ",a");
6263 p += 2;
6264 }
6265
6266 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6267 {
6268 strcpy (p,
6269 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6270 ? ",pt" : ",pn");
6271 p += 3;
6272 }
6273
6274 *p = p < string + 8 ? '\t' : ' ';
6275 p++;
6276 *p++ = '%';
6277 *p++ = '0' + reg;
6278 *p++ = ',';
6279 *p++ = ' ';
6280 if (far)
6281 {
6282 int veryfar = 1, delta;
6283
6284 if (INSN_ADDRESSES_SET_P ())
6285 {
6286 delta = (INSN_ADDRESSES (INSN_UID (dest))
6287 - INSN_ADDRESSES (INSN_UID (insn)));
6288 /* Leave some instructions for "slop". */
6289 if (delta >= -260000 && delta < 260000)
6290 veryfar = 0;
6291 }
6292
6293 strcpy (p, ".+12\n\t nop\n\t");
6294 /* Skip the next insn if requested or
6295 if we know that it will be a nop. */
6296 if (annul || ! final_sequence)
6297 p[3] = '6';
6298 p += 12;
6299 if (veryfar)
6300 {
6301 strcpy (p, "b\t");
6302 p += 2;
6303 }
6304 else
6305 {
6306 strcpy (p, "ba,pt\t%%xcc, ");
6307 p += 13;
6308 }
6309 }
6310 *p++ = '%';
6311 *p++ = 'l';
6312 *p++ = '0' + label;
6313 *p++ = '%';
6314 *p++ = '#';
6315 *p = '\0';
6316
6317 return string;
6318 }
6319
6320 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6321 Such instructions cannot be used in the delay slot of return insn on v9.
6322 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6323 */
6324
6325 static int
6326 epilogue_renumber (register rtx *where, int test)
6327 {
6328 register const char *fmt;
6329 register int i;
6330 register enum rtx_code code;
6331
6332 if (*where == 0)
6333 return 0;
6334
6335 code = GET_CODE (*where);
6336
6337 switch (code)
6338 {
6339 case REG:
6340 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
6341 return 1;
6342 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6343 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6344 case SCRATCH:
6345 case CC0:
6346 case PC:
6347 case CONST_INT:
6348 case CONST_DOUBLE:
6349 return 0;
6350
6351 /* Do not replace the frame pointer with the stack pointer because
6352 it can cause the delayed instruction to load below the stack.
6353 This occurs when instructions like:
6354
6355 (set (reg/i:SI 24 %i0)
6356 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6357 (const_int -20 [0xffffffec])) 0))
6358
6359 are in the return delayed slot. */
6360 case PLUS:
6361 if (GET_CODE (XEXP (*where, 0)) == REG
6362 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6363 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6364 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6365 return 1;
6366 break;
6367
6368 case MEM:
6369 if (SPARC_STACK_BIAS
6370 && GET_CODE (XEXP (*where, 0)) == REG
6371 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6372 return 1;
6373 break;
6374
6375 default:
6376 break;
6377 }
6378
6379 fmt = GET_RTX_FORMAT (code);
6380
6381 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6382 {
6383 if (fmt[i] == 'E')
6384 {
6385 register int j;
6386 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6387 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6388 return 1;
6389 }
6390 else if (fmt[i] == 'e'
6391 && epilogue_renumber (&(XEXP (*where, i)), test))
6392 return 1;
6393 }
6394 return 0;
6395 }
6396 \f
6397 /* Leaf functions and non-leaf functions have different needs. */
6398
6399 static const int
6400 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6401
6402 static const int
6403 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6404
6405 static const int *const reg_alloc_orders[] = {
6406 reg_leaf_alloc_order,
6407 reg_nonleaf_alloc_order};
6408
6409 void
6410 order_regs_for_local_alloc (void)
6411 {
6412 static int last_order_nonleaf = 1;
6413
6414 if (regs_ever_live[15] != last_order_nonleaf)
6415 {
6416 last_order_nonleaf = !last_order_nonleaf;
6417 memcpy ((char *) reg_alloc_order,
6418 (const char *) reg_alloc_orders[last_order_nonleaf],
6419 FIRST_PSEUDO_REGISTER * sizeof (int));
6420 }
6421 }
6422 \f
6423 /* Return 1 if REG and MEM are legitimate enough to allow the various
6424 mem<-->reg splits to be run. */
6425
6426 int
6427 sparc_splitdi_legitimate (rtx reg, rtx mem)
6428 {
6429 /* Punt if we are here by mistake. */
6430 gcc_assert (reload_completed);
6431
6432 /* We must have an offsettable memory reference. */
6433 if (! offsettable_memref_p (mem))
6434 return 0;
6435
6436 /* If we have legitimate args for ldd/std, we do not want
6437 the split to happen. */
6438 if ((REGNO (reg) % 2) == 0
6439 && mem_min_alignment (mem, 8))
6440 return 0;
6441
6442 /* Success. */
6443 return 1;
6444 }
6445
6446 /* Return 1 if x and y are some kind of REG and they refer to
6447 different hard registers. This test is guaranteed to be
6448 run after reload. */
6449
6450 int
6451 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6452 {
6453 if (GET_CODE (x) != REG)
6454 return 0;
6455 if (GET_CODE (y) != REG)
6456 return 0;
6457 if (REGNO (x) == REGNO (y))
6458 return 0;
6459 return 1;
6460 }
6461
6462 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6463 This makes them candidates for using ldd and std insns.
6464
6465 Note reg1 and reg2 *must* be hard registers. */
6466
6467 int
6468 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6469 {
6470 /* We might have been passed a SUBREG. */
6471 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6472 return 0;
6473
6474 if (REGNO (reg1) % 2 != 0)
6475 return 0;
6476
6477 /* Integer ldd is deprecated in SPARC V9 */
6478 if (TARGET_V9 && REGNO (reg1) < 32)
6479 return 0;
6480
6481 return (REGNO (reg1) == REGNO (reg2) - 1);
6482 }
6483
6484 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6485 an ldd or std insn.
6486
6487 This can only happen when addr1 and addr2, the addresses in mem1
6488 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6489 addr1 must also be aligned on a 64-bit boundary.
6490
6491 Also iff dependent_reg_rtx is not null it should not be used to
6492 compute the address for mem1, i.e. we cannot optimize a sequence
6493 like:
6494 ld [%o0], %o0
6495 ld [%o0 + 4], %o1
6496 to
6497 ldd [%o0], %o0
6498 nor:
6499 ld [%g3 + 4], %g3
6500 ld [%g3], %g2
6501 to
6502 ldd [%g3], %g2
6503
6504 But, note that the transformation from:
6505 ld [%g2 + 4], %g3
6506 ld [%g2], %g2
6507 to
6508 ldd [%g2], %g2
6509 is perfectly fine. Thus, the peephole2 patterns always pass us
6510 the destination register of the first load, never the second one.
6511
6512 For stores we don't have a similar problem, so dependent_reg_rtx is
6513 NULL_RTX. */
6514
6515 int
6516 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6517 {
6518 rtx addr1, addr2;
6519 unsigned int reg1;
6520 HOST_WIDE_INT offset1;
6521
6522 /* The mems cannot be volatile. */
6523 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6524 return 0;
6525
6526 /* MEM1 should be aligned on a 64-bit boundary. */
6527 if (MEM_ALIGN (mem1) < 64)
6528 return 0;
6529
6530 addr1 = XEXP (mem1, 0);
6531 addr2 = XEXP (mem2, 0);
6532
6533 /* Extract a register number and offset (if used) from the first addr. */
6534 if (GET_CODE (addr1) == PLUS)
6535 {
6536 /* If not a REG, return zero. */
6537 if (GET_CODE (XEXP (addr1, 0)) != REG)
6538 return 0;
6539 else
6540 {
6541 reg1 = REGNO (XEXP (addr1, 0));
6542 /* The offset must be constant! */
6543 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6544 return 0;
6545 offset1 = INTVAL (XEXP (addr1, 1));
6546 }
6547 }
6548 else if (GET_CODE (addr1) != REG)
6549 return 0;
6550 else
6551 {
6552 reg1 = REGNO (addr1);
6553 /* This was a simple (mem (reg)) expression. Offset is 0. */
6554 offset1 = 0;
6555 }
6556
6557 /* Make sure the second address is a (mem (plus (reg) (const_int). */
6558 if (GET_CODE (addr2) != PLUS)
6559 return 0;
6560
6561 if (GET_CODE (XEXP (addr2, 0)) != REG
6562 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6563 return 0;
6564
6565 if (reg1 != REGNO (XEXP (addr2, 0)))
6566 return 0;
6567
6568 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6569 return 0;
6570
6571 /* The first offset must be evenly divisible by 8 to ensure the
6572 address is 64 bit aligned. */
6573 if (offset1 % 8 != 0)
6574 return 0;
6575
6576 /* The offset for the second addr must be 4 more than the first addr. */
6577 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6578 return 0;
6579
6580 /* All the tests passed. addr1 and addr2 are valid for ldd and std
6581 instructions. */
6582 return 1;
6583 }
6584
6585 /* Return 1 if reg is a pseudo, or is the first register in
6586 a hard register pair. This makes it a candidate for use in
6587 ldd and std insns. */
6588
6589 int
6590 register_ok_for_ldd (rtx reg)
6591 {
6592 /* We might have been passed a SUBREG. */
6593 if (GET_CODE (reg) != REG)
6594 return 0;
6595
6596 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6597 return (REGNO (reg) % 2 == 0);
6598 else
6599 return 1;
6600 }
6601 \f
6602 /* Print operand X (an rtx) in assembler syntax to file FILE.
6603 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6604 For `%' followed by punctuation, CODE is the punctuation and X is null. */
6605
6606 void
6607 print_operand (FILE *file, rtx x, int code)
6608 {
6609 switch (code)
6610 {
6611 case '#':
6612 /* Output an insn in a delay slot. */
6613 if (final_sequence)
6614 sparc_indent_opcode = 1;
6615 else
6616 fputs ("\n\t nop", file);
6617 return;
6618 case '*':
6619 /* Output an annul flag if there's nothing for the delay slot and we
6620 are optimizing. This is always used with '(' below.
6621 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6622 this is a dbx bug. So, we only do this when optimizing.
6623 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6624 Always emit a nop in case the next instruction is a branch. */
6625 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6626 fputs (",a", file);
6627 return;
6628 case '(':
6629 /* Output a 'nop' if there's nothing for the delay slot and we are
6630 not optimizing. This is always used with '*' above. */
6631 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6632 fputs ("\n\t nop", file);
6633 else if (final_sequence)
6634 sparc_indent_opcode = 1;
6635 return;
6636 case ')':
6637 /* Output the right displacement from the saved PC on function return.
6638 The caller may have placed an "unimp" insn immediately after the call
6639 so we have to account for it. This insn is used in the 32-bit ABI
6640 when calling a function that returns a non zero-sized structure. The
6641 64-bit ABI doesn't have it. Be careful to have this test be the same
6642 as that used on the call. */
6643 if (! TARGET_ARCH64
6644 && current_function_returns_struct
6645 && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6646 == INTEGER_CST)
6647 && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6648 fputs ("12", file);
6649 else
6650 fputc ('8', file);
6651 return;
6652 case '_':
6653 /* Output the Embedded Medium/Anywhere code model base register. */
6654 fputs (EMBMEDANY_BASE_REG, file);
6655 return;
6656 case '&':
6657 /* Print some local dynamic TLS name. */
6658 assemble_name (file, get_some_local_dynamic_name ());
6659 return;
6660
6661 case 'Y':
6662 /* Adjust the operand to take into account a RESTORE operation. */
6663 if (GET_CODE (x) == CONST_INT)
6664 break;
6665 else if (GET_CODE (x) != REG)
6666 output_operand_lossage ("invalid %%Y operand");
6667 else if (REGNO (x) < 8)
6668 fputs (reg_names[REGNO (x)], file);
6669 else if (REGNO (x) >= 24 && REGNO (x) < 32)
6670 fputs (reg_names[REGNO (x)-16], file);
6671 else
6672 output_operand_lossage ("invalid %%Y operand");
6673 return;
6674 case 'L':
6675 /* Print out the low order register name of a register pair. */
6676 if (WORDS_BIG_ENDIAN)
6677 fputs (reg_names[REGNO (x)+1], file);
6678 else
6679 fputs (reg_names[REGNO (x)], file);
6680 return;
6681 case 'H':
6682 /* Print out the high order register name of a register pair. */
6683 if (WORDS_BIG_ENDIAN)
6684 fputs (reg_names[REGNO (x)], file);
6685 else
6686 fputs (reg_names[REGNO (x)+1], file);
6687 return;
6688 case 'R':
6689 /* Print out the second register name of a register pair or quad.
6690 I.e., R (%o0) => %o1. */
6691 fputs (reg_names[REGNO (x)+1], file);
6692 return;
6693 case 'S':
6694 /* Print out the third register name of a register quad.
6695 I.e., S (%o0) => %o2. */
6696 fputs (reg_names[REGNO (x)+2], file);
6697 return;
6698 case 'T':
6699 /* Print out the fourth register name of a register quad.
6700 I.e., T (%o0) => %o3. */
6701 fputs (reg_names[REGNO (x)+3], file);
6702 return;
6703 case 'x':
6704 /* Print a condition code register. */
6705 if (REGNO (x) == SPARC_ICC_REG)
6706 {
6707 /* We don't handle CC[X]_NOOVmode because they're not supposed
6708 to occur here. */
6709 if (GET_MODE (x) == CCmode)
6710 fputs ("%icc", file);
6711 else if (GET_MODE (x) == CCXmode)
6712 fputs ("%xcc", file);
6713 else
6714 gcc_unreachable ();
6715 }
6716 else
6717 /* %fccN register */
6718 fputs (reg_names[REGNO (x)], file);
6719 return;
6720 case 'm':
6721 /* Print the operand's address only. */
6722 output_address (XEXP (x, 0));
6723 return;
6724 case 'r':
6725 /* In this case we need a register. Use %g0 if the
6726 operand is const0_rtx. */
6727 if (x == const0_rtx
6728 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6729 {
6730 fputs ("%g0", file);
6731 return;
6732 }
6733 else
6734 break;
6735
6736 case 'A':
6737 switch (GET_CODE (x))
6738 {
6739 case IOR: fputs ("or", file); break;
6740 case AND: fputs ("and", file); break;
6741 case XOR: fputs ("xor", file); break;
6742 default: output_operand_lossage ("invalid %%A operand");
6743 }
6744 return;
6745
6746 case 'B':
6747 switch (GET_CODE (x))
6748 {
6749 case IOR: fputs ("orn", file); break;
6750 case AND: fputs ("andn", file); break;
6751 case XOR: fputs ("xnor", file); break;
6752 default: output_operand_lossage ("invalid %%B operand");
6753 }
6754 return;
6755
6756 /* These are used by the conditional move instructions. */
6757 case 'c' :
6758 case 'C':
6759 {
6760 enum rtx_code rc = GET_CODE (x);
6761
6762 if (code == 'c')
6763 {
6764 enum machine_mode mode = GET_MODE (XEXP (x, 0));
6765 if (mode == CCFPmode || mode == CCFPEmode)
6766 rc = reverse_condition_maybe_unordered (GET_CODE (x));
6767 else
6768 rc = reverse_condition (GET_CODE (x));
6769 }
6770 switch (rc)
6771 {
6772 case NE: fputs ("ne", file); break;
6773 case EQ: fputs ("e", file); break;
6774 case GE: fputs ("ge", file); break;
6775 case GT: fputs ("g", file); break;
6776 case LE: fputs ("le", file); break;
6777 case LT: fputs ("l", file); break;
6778 case GEU: fputs ("geu", file); break;
6779 case GTU: fputs ("gu", file); break;
6780 case LEU: fputs ("leu", file); break;
6781 case LTU: fputs ("lu", file); break;
6782 case LTGT: fputs ("lg", file); break;
6783 case UNORDERED: fputs ("u", file); break;
6784 case ORDERED: fputs ("o", file); break;
6785 case UNLT: fputs ("ul", file); break;
6786 case UNLE: fputs ("ule", file); break;
6787 case UNGT: fputs ("ug", file); break;
6788 case UNGE: fputs ("uge", file); break;
6789 case UNEQ: fputs ("ue", file); break;
6790 default: output_operand_lossage (code == 'c'
6791 ? "invalid %%c operand"
6792 : "invalid %%C operand");
6793 }
6794 return;
6795 }
6796
6797 /* These are used by the movr instruction pattern. */
6798 case 'd':
6799 case 'D':
6800 {
6801 enum rtx_code rc = (code == 'd'
6802 ? reverse_condition (GET_CODE (x))
6803 : GET_CODE (x));
6804 switch (rc)
6805 {
6806 case NE: fputs ("ne", file); break;
6807 case EQ: fputs ("e", file); break;
6808 case GE: fputs ("gez", file); break;
6809 case LT: fputs ("lz", file); break;
6810 case LE: fputs ("lez", file); break;
6811 case GT: fputs ("gz", file); break;
6812 default: output_operand_lossage (code == 'd'
6813 ? "invalid %%d operand"
6814 : "invalid %%D operand");
6815 }
6816 return;
6817 }
6818
6819 case 'b':
6820 {
6821 /* Print a sign-extended character. */
6822 int i = trunc_int_for_mode (INTVAL (x), QImode);
6823 fprintf (file, "%d", i);
6824 return;
6825 }
6826
6827 case 'f':
6828 /* Operand must be a MEM; write its address. */
6829 if (GET_CODE (x) != MEM)
6830 output_operand_lossage ("invalid %%f operand");
6831 output_address (XEXP (x, 0));
6832 return;
6833
6834 case 's':
6835 {
6836 /* Print a sign-extended 32-bit value. */
6837 HOST_WIDE_INT i;
6838 if (GET_CODE(x) == CONST_INT)
6839 i = INTVAL (x);
6840 else if (GET_CODE(x) == CONST_DOUBLE)
6841 i = CONST_DOUBLE_LOW (x);
6842 else
6843 {
6844 output_operand_lossage ("invalid %%s operand");
6845 return;
6846 }
6847 i = trunc_int_for_mode (i, SImode);
6848 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6849 return;
6850 }
6851
6852 case 0:
6853 /* Do nothing special. */
6854 break;
6855
6856 default:
6857 /* Undocumented flag. */
6858 output_operand_lossage ("invalid operand output code");
6859 }
6860
6861 if (GET_CODE (x) == REG)
6862 fputs (reg_names[REGNO (x)], file);
6863 else if (GET_CODE (x) == MEM)
6864 {
6865 fputc ('[', file);
6866 /* Poor Sun assembler doesn't understand absolute addressing. */
6867 if (CONSTANT_P (XEXP (x, 0)))
6868 fputs ("%g0+", file);
6869 output_address (XEXP (x, 0));
6870 fputc (']', file);
6871 }
6872 else if (GET_CODE (x) == HIGH)
6873 {
6874 fputs ("%hi(", file);
6875 output_addr_const (file, XEXP (x, 0));
6876 fputc (')', file);
6877 }
6878 else if (GET_CODE (x) == LO_SUM)
6879 {
6880 print_operand (file, XEXP (x, 0), 0);
6881 if (TARGET_CM_MEDMID)
6882 fputs ("+%l44(", file);
6883 else
6884 fputs ("+%lo(", file);
6885 output_addr_const (file, XEXP (x, 1));
6886 fputc (')', file);
6887 }
6888 else if (GET_CODE (x) == CONST_DOUBLE
6889 && (GET_MODE (x) == VOIDmode
6890 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6891 {
6892 if (CONST_DOUBLE_HIGH (x) == 0)
6893 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6894 else if (CONST_DOUBLE_HIGH (x) == -1
6895 && CONST_DOUBLE_LOW (x) < 0)
6896 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6897 else
6898 output_operand_lossage ("long long constant not a valid immediate operand");
6899 }
6900 else if (GET_CODE (x) == CONST_DOUBLE)
6901 output_operand_lossage ("floating point constant not a valid immediate operand");
6902 else { output_addr_const (file, x); }
6903 }
6904 \f
6905 /* Target hook for assembling integer objects. The sparc version has
6906 special handling for aligned DI-mode objects. */
6907
6908 static bool
6909 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6910 {
6911 /* ??? We only output .xword's for symbols and only then in environments
6912 where the assembler can handle them. */
6913 if (aligned_p && size == 8
6914 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6915 {
6916 if (TARGET_V9)
6917 {
6918 assemble_integer_with_op ("\t.xword\t", x);
6919 return true;
6920 }
6921 else
6922 {
6923 assemble_aligned_integer (4, const0_rtx);
6924 assemble_aligned_integer (4, x);
6925 return true;
6926 }
6927 }
6928 return default_assemble_integer (x, size, aligned_p);
6929 }
6930 \f
6931 /* Return the value of a code used in the .proc pseudo-op that says
6932 what kind of result this function returns. For non-C types, we pick
6933 the closest C type. */
6934
6935 #ifndef SHORT_TYPE_SIZE
6936 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6937 #endif
6938
6939 #ifndef INT_TYPE_SIZE
6940 #define INT_TYPE_SIZE BITS_PER_WORD
6941 #endif
6942
6943 #ifndef LONG_TYPE_SIZE
6944 #define LONG_TYPE_SIZE BITS_PER_WORD
6945 #endif
6946
6947 #ifndef LONG_LONG_TYPE_SIZE
6948 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
6949 #endif
6950
6951 #ifndef FLOAT_TYPE_SIZE
6952 #define FLOAT_TYPE_SIZE BITS_PER_WORD
6953 #endif
6954
6955 #ifndef DOUBLE_TYPE_SIZE
6956 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6957 #endif
6958
6959 #ifndef LONG_DOUBLE_TYPE_SIZE
6960 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
6961 #endif
6962
6963 unsigned long
6964 sparc_type_code (register tree type)
6965 {
6966 register unsigned long qualifiers = 0;
6967 register unsigned shift;
6968
6969 /* Only the first 30 bits of the qualifier are valid. We must refrain from
6970 setting more, since some assemblers will give an error for this. Also,
6971 we must be careful to avoid shifts of 32 bits or more to avoid getting
6972 unpredictable results. */
6973
6974 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
6975 {
6976 switch (TREE_CODE (type))
6977 {
6978 case ERROR_MARK:
6979 return qualifiers;
6980
6981 case ARRAY_TYPE:
6982 qualifiers |= (3 << shift);
6983 break;
6984
6985 case FUNCTION_TYPE:
6986 case METHOD_TYPE:
6987 qualifiers |= (2 << shift);
6988 break;
6989
6990 case POINTER_TYPE:
6991 case REFERENCE_TYPE:
6992 case OFFSET_TYPE:
6993 qualifiers |= (1 << shift);
6994 break;
6995
6996 case RECORD_TYPE:
6997 return (qualifiers | 8);
6998
6999 case UNION_TYPE:
7000 case QUAL_UNION_TYPE:
7001 return (qualifiers | 9);
7002
7003 case ENUMERAL_TYPE:
7004 return (qualifiers | 10);
7005
7006 case VOID_TYPE:
7007 return (qualifiers | 16);
7008
7009 case INTEGER_TYPE:
7010 /* If this is a range type, consider it to be the underlying
7011 type. */
7012 if (TREE_TYPE (type) != 0)
7013 break;
7014
7015 /* Carefully distinguish all the standard types of C,
7016 without messing up if the language is not C. We do this by
7017 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7018 look at both the names and the above fields, but that's redundant.
7019 Any type whose size is between two C types will be considered
7020 to be the wider of the two types. Also, we do not have a
7021 special code to use for "long long", so anything wider than
7022 long is treated the same. Note that we can't distinguish
7023 between "int" and "long" in this code if they are the same
7024 size, but that's fine, since neither can the assembler. */
7025
7026 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7027 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7028
7029 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7030 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7031
7032 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7033 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7034
7035 else
7036 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7037
7038 case REAL_TYPE:
7039 /* If this is a range type, consider it to be the underlying
7040 type. */
7041 if (TREE_TYPE (type) != 0)
7042 break;
7043
7044 /* Carefully distinguish all the standard types of C,
7045 without messing up if the language is not C. */
7046
7047 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7048 return (qualifiers | 6);
7049
7050 else
7051 return (qualifiers | 7);
7052
7053 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7054 /* ??? We need to distinguish between double and float complex types,
7055 but I don't know how yet because I can't reach this code from
7056 existing front-ends. */
7057 return (qualifiers | 7); /* Who knows? */
7058
7059 case VECTOR_TYPE:
7060 case BOOLEAN_TYPE: /* Boolean truth value type. */
7061 case LANG_TYPE: /* ? */
7062 return qualifiers;
7063
7064 default:
7065 gcc_unreachable (); /* Not a type! */
7066 }
7067 }
7068
7069 return qualifiers;
7070 }
7071 \f
7072 /* Nested function support. */
7073
7074 /* Emit RTL insns to initialize the variable parts of a trampoline.
7075 FNADDR is an RTX for the address of the function's pure code.
7076 CXT is an RTX for the static chain value for the function.
7077
7078 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7079 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7080 (to store insns). This is a bit excessive. Perhaps a different
7081 mechanism would be better here.
7082
7083 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7084
7085 void
7086 sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7087 {
7088 /* SPARC 32-bit trampoline:
7089
7090 sethi %hi(fn), %g1
7091 sethi %hi(static), %g2
7092 jmp %g1+%lo(fn)
7093 or %g2, %lo(static), %g2
7094
7095 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7096 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7097 */
7098
7099 emit_move_insn
7100 (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7101 expand_binop (SImode, ior_optab,
7102 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7103 size_int (10), 0, 1),
7104 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7105 NULL_RTX, 1, OPTAB_DIRECT));
7106
7107 emit_move_insn
7108 (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7109 expand_binop (SImode, ior_optab,
7110 expand_shift (RSHIFT_EXPR, SImode, cxt,
7111 size_int (10), 0, 1),
7112 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7113 NULL_RTX, 1, OPTAB_DIRECT));
7114
7115 emit_move_insn
7116 (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7117 expand_binop (SImode, ior_optab,
7118 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7119 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7120 NULL_RTX, 1, OPTAB_DIRECT));
7121
7122 emit_move_insn
7123 (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7124 expand_binop (SImode, ior_optab,
7125 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7126 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7127 NULL_RTX, 1, OPTAB_DIRECT));
7128
7129 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7130 aligned on a 16 byte boundary so one flush clears it all. */
7131 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7132 if (sparc_cpu != PROCESSOR_ULTRASPARC
7133 && sparc_cpu != PROCESSOR_ULTRASPARC3
7134 && sparc_cpu != PROCESSOR_NIAGARA)
7135 emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7136 plus_constant (tramp, 8)))));
7137
7138 /* Call __enable_execute_stack after writing onto the stack to make sure
7139 the stack address is accessible. */
7140 #ifdef ENABLE_EXECUTE_STACK
7141 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7142 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7143 #endif
7144
7145 }
7146
7147 /* The 64-bit version is simpler because it makes more sense to load the
7148 values as "immediate" data out of the trampoline. It's also easier since
7149 we can read the PC without clobbering a register. */
7150
7151 void
7152 sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7153 {
7154 /* SPARC 64-bit trampoline:
7155
7156 rd %pc, %g1
7157 ldx [%g1+24], %g5
7158 jmp %g5
7159 ldx [%g1+16], %g5
7160 +16 bytes data
7161 */
7162
7163 emit_move_insn (gen_rtx_MEM (SImode, tramp),
7164 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7165 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7166 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7167 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7168 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7169 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7170 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7171 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7172 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7173 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7174
7175 if (sparc_cpu != PROCESSOR_ULTRASPARC
7176 && sparc_cpu != PROCESSOR_ULTRASPARC3
7177 && sparc_cpu != PROCESSOR_NIAGARA)
7178 emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7179
7180 /* Call __enable_execute_stack after writing onto the stack to make sure
7181 the stack address is accessible. */
7182 #ifdef ENABLE_EXECUTE_STACK
7183 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7184 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7185 #endif
7186 }
7187 \f
7188 /* Adjust the cost of a scheduling dependency. Return the new cost of
7189 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7190
7191 static int
7192 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7193 {
7194 enum attr_type insn_type;
7195
7196 if (! recog_memoized (insn))
7197 return 0;
7198
7199 insn_type = get_attr_type (insn);
7200
7201 if (REG_NOTE_KIND (link) == 0)
7202 {
7203 /* Data dependency; DEP_INSN writes a register that INSN reads some
7204 cycles later. */
7205
7206 /* if a load, then the dependence must be on the memory address;
7207 add an extra "cycle". Note that the cost could be two cycles
7208 if the reg was written late in an instruction group; we ca not tell
7209 here. */
7210 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7211 return cost + 3;
7212
7213 /* Get the delay only if the address of the store is the dependence. */
7214 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7215 {
7216 rtx pat = PATTERN(insn);
7217 rtx dep_pat = PATTERN (dep_insn);
7218
7219 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7220 return cost; /* This should not happen! */
7221
7222 /* The dependency between the two instructions was on the data that
7223 is being stored. Assume that this implies that the address of the
7224 store is not dependent. */
7225 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7226 return cost;
7227
7228 return cost + 3; /* An approximation. */
7229 }
7230
7231 /* A shift instruction cannot receive its data from an instruction
7232 in the same cycle; add a one cycle penalty. */
7233 if (insn_type == TYPE_SHIFT)
7234 return cost + 3; /* Split before cascade into shift. */
7235 }
7236 else
7237 {
7238 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7239 INSN writes some cycles later. */
7240
7241 /* These are only significant for the fpu unit; writing a fp reg before
7242 the fpu has finished with it stalls the processor. */
7243
7244 /* Reusing an integer register causes no problems. */
7245 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7246 return 0;
7247 }
7248
7249 return cost;
7250 }
7251
7252 static int
7253 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7254 {
7255 enum attr_type insn_type, dep_type;
7256 rtx pat = PATTERN(insn);
7257 rtx dep_pat = PATTERN (dep_insn);
7258
7259 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7260 return cost;
7261
7262 insn_type = get_attr_type (insn);
7263 dep_type = get_attr_type (dep_insn);
7264
7265 switch (REG_NOTE_KIND (link))
7266 {
7267 case 0:
7268 /* Data dependency; DEP_INSN writes a register that INSN reads some
7269 cycles later. */
7270
7271 switch (insn_type)
7272 {
7273 case TYPE_STORE:
7274 case TYPE_FPSTORE:
7275 /* Get the delay iff the address of the store is the dependence. */
7276 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7277 return cost;
7278
7279 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7280 return cost;
7281 return cost + 3;
7282
7283 case TYPE_LOAD:
7284 case TYPE_SLOAD:
7285 case TYPE_FPLOAD:
7286 /* If a load, then the dependence must be on the memory address. If
7287 the addresses aren't equal, then it might be a false dependency */
7288 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7289 {
7290 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7291 || GET_CODE (SET_DEST (dep_pat)) != MEM
7292 || GET_CODE (SET_SRC (pat)) != MEM
7293 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7294 XEXP (SET_SRC (pat), 0)))
7295 return cost + 2;
7296
7297 return cost + 8;
7298 }
7299 break;
7300
7301 case TYPE_BRANCH:
7302 /* Compare to branch latency is 0. There is no benefit from
7303 separating compare and branch. */
7304 if (dep_type == TYPE_COMPARE)
7305 return 0;
7306 /* Floating point compare to branch latency is less than
7307 compare to conditional move. */
7308 if (dep_type == TYPE_FPCMP)
7309 return cost - 1;
7310 break;
7311 default:
7312 break;
7313 }
7314 break;
7315
7316 case REG_DEP_ANTI:
7317 /* Anti-dependencies only penalize the fpu unit. */
7318 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7319 return 0;
7320 break;
7321
7322 default:
7323 break;
7324 }
7325
7326 return cost;
7327 }
7328
7329 static int
7330 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7331 {
7332 switch (sparc_cpu)
7333 {
7334 case PROCESSOR_SUPERSPARC:
7335 cost = supersparc_adjust_cost (insn, link, dep, cost);
7336 break;
7337 case PROCESSOR_HYPERSPARC:
7338 case PROCESSOR_SPARCLITE86X:
7339 cost = hypersparc_adjust_cost (insn, link, dep, cost);
7340 break;
7341 default:
7342 break;
7343 }
7344 return cost;
7345 }
7346
7347 static void
7348 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7349 int sched_verbose ATTRIBUTE_UNUSED,
7350 int max_ready ATTRIBUTE_UNUSED)
7351 {
7352 }
7353
7354 static int
7355 sparc_use_sched_lookahead (void)
7356 {
7357 if (sparc_cpu == PROCESSOR_NIAGARA)
7358 return 0;
7359 if (sparc_cpu == PROCESSOR_ULTRASPARC
7360 || sparc_cpu == PROCESSOR_ULTRASPARC3)
7361 return 4;
7362 if ((1 << sparc_cpu) &
7363 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7364 (1 << PROCESSOR_SPARCLITE86X)))
7365 return 3;
7366 return 0;
7367 }
7368
7369 static int
7370 sparc_issue_rate (void)
7371 {
7372 switch (sparc_cpu)
7373 {
7374 case PROCESSOR_NIAGARA:
7375 default:
7376 return 1;
7377 case PROCESSOR_V9:
7378 /* Assume V9 processors are capable of at least dual-issue. */
7379 return 2;
7380 case PROCESSOR_SUPERSPARC:
7381 return 3;
7382 case PROCESSOR_HYPERSPARC:
7383 case PROCESSOR_SPARCLITE86X:
7384 return 2;
7385 case PROCESSOR_ULTRASPARC:
7386 case PROCESSOR_ULTRASPARC3:
7387 return 4;
7388 }
7389 }
7390
7391 static int
7392 set_extends (rtx insn)
7393 {
7394 register rtx pat = PATTERN (insn);
7395
7396 switch (GET_CODE (SET_SRC (pat)))
7397 {
7398 /* Load and some shift instructions zero extend. */
7399 case MEM:
7400 case ZERO_EXTEND:
7401 /* sethi clears the high bits */
7402 case HIGH:
7403 /* LO_SUM is used with sethi. sethi cleared the high
7404 bits and the values used with lo_sum are positive */
7405 case LO_SUM:
7406 /* Store flag stores 0 or 1 */
7407 case LT: case LTU:
7408 case GT: case GTU:
7409 case LE: case LEU:
7410 case GE: case GEU:
7411 case EQ:
7412 case NE:
7413 return 1;
7414 case AND:
7415 {
7416 rtx op0 = XEXP (SET_SRC (pat), 0);
7417 rtx op1 = XEXP (SET_SRC (pat), 1);
7418 if (GET_CODE (op1) == CONST_INT)
7419 return INTVAL (op1) >= 0;
7420 if (GET_CODE (op0) != REG)
7421 return 0;
7422 if (sparc_check_64 (op0, insn) == 1)
7423 return 1;
7424 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7425 }
7426 case IOR:
7427 case XOR:
7428 {
7429 rtx op0 = XEXP (SET_SRC (pat), 0);
7430 rtx op1 = XEXP (SET_SRC (pat), 1);
7431 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7432 return 0;
7433 if (GET_CODE (op1) == CONST_INT)
7434 return INTVAL (op1) >= 0;
7435 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7436 }
7437 case LSHIFTRT:
7438 return GET_MODE (SET_SRC (pat)) == SImode;
7439 /* Positive integers leave the high bits zero. */
7440 case CONST_DOUBLE:
7441 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7442 case CONST_INT:
7443 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7444 case ASHIFTRT:
7445 case SIGN_EXTEND:
7446 return - (GET_MODE (SET_SRC (pat)) == SImode);
7447 case REG:
7448 return sparc_check_64 (SET_SRC (pat), insn);
7449 default:
7450 return 0;
7451 }
7452 }
7453
7454 /* We _ought_ to have only one kind per function, but... */
7455 static GTY(()) rtx sparc_addr_diff_list;
7456 static GTY(()) rtx sparc_addr_list;
7457
7458 void
7459 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7460 {
7461 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7462 if (diff)
7463 sparc_addr_diff_list
7464 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7465 else
7466 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7467 }
7468
7469 static void
7470 sparc_output_addr_vec (rtx vec)
7471 {
7472 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7473 int idx, vlen = XVECLEN (body, 0);
7474
7475 #ifdef ASM_OUTPUT_ADDR_VEC_START
7476 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7477 #endif
7478
7479 #ifdef ASM_OUTPUT_CASE_LABEL
7480 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7481 NEXT_INSN (lab));
7482 #else
7483 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7484 #endif
7485
7486 for (idx = 0; idx < vlen; idx++)
7487 {
7488 ASM_OUTPUT_ADDR_VEC_ELT
7489 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7490 }
7491
7492 #ifdef ASM_OUTPUT_ADDR_VEC_END
7493 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7494 #endif
7495 }
7496
7497 static void
7498 sparc_output_addr_diff_vec (rtx vec)
7499 {
7500 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7501 rtx base = XEXP (XEXP (body, 0), 0);
7502 int idx, vlen = XVECLEN (body, 1);
7503
7504 #ifdef ASM_OUTPUT_ADDR_VEC_START
7505 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7506 #endif
7507
7508 #ifdef ASM_OUTPUT_CASE_LABEL
7509 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7510 NEXT_INSN (lab));
7511 #else
7512 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7513 #endif
7514
7515 for (idx = 0; idx < vlen; idx++)
7516 {
7517 ASM_OUTPUT_ADDR_DIFF_ELT
7518 (asm_out_file,
7519 body,
7520 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7521 CODE_LABEL_NUMBER (base));
7522 }
7523
7524 #ifdef ASM_OUTPUT_ADDR_VEC_END
7525 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7526 #endif
7527 }
7528
7529 static void
7530 sparc_output_deferred_case_vectors (void)
7531 {
7532 rtx t;
7533 int align;
7534
7535 if (sparc_addr_list == NULL_RTX
7536 && sparc_addr_diff_list == NULL_RTX)
7537 return;
7538
7539 /* Align to cache line in the function's code section. */
7540 switch_to_section (current_function_section ());
7541
7542 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7543 if (align > 0)
7544 ASM_OUTPUT_ALIGN (asm_out_file, align);
7545
7546 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7547 sparc_output_addr_vec (XEXP (t, 0));
7548 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7549 sparc_output_addr_diff_vec (XEXP (t, 0));
7550
7551 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7552 }
7553
7554 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7555 unknown. Return 1 if the high bits are zero, -1 if the register is
7556 sign extended. */
7557 int
7558 sparc_check_64 (rtx x, rtx insn)
7559 {
7560 /* If a register is set only once it is safe to ignore insns this
7561 code does not know how to handle. The loop will either recognize
7562 the single set and return the correct value or fail to recognize
7563 it and return 0. */
7564 int set_once = 0;
7565 rtx y = x;
7566
7567 gcc_assert (GET_CODE (x) == REG);
7568
7569 if (GET_MODE (x) == DImode)
7570 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7571
7572 if (flag_expensive_optimizations
7573 && REG_N_SETS (REGNO (y)) == 1)
7574 set_once = 1;
7575
7576 if (insn == 0)
7577 {
7578 if (set_once)
7579 insn = get_last_insn_anywhere ();
7580 else
7581 return 0;
7582 }
7583
7584 while ((insn = PREV_INSN (insn)))
7585 {
7586 switch (GET_CODE (insn))
7587 {
7588 case JUMP_INSN:
7589 case NOTE:
7590 break;
7591 case CODE_LABEL:
7592 case CALL_INSN:
7593 default:
7594 if (! set_once)
7595 return 0;
7596 break;
7597 case INSN:
7598 {
7599 rtx pat = PATTERN (insn);
7600 if (GET_CODE (pat) != SET)
7601 return 0;
7602 if (rtx_equal_p (x, SET_DEST (pat)))
7603 return set_extends (insn);
7604 if (y && rtx_equal_p (y, SET_DEST (pat)))
7605 return set_extends (insn);
7606 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7607 return 0;
7608 }
7609 }
7610 }
7611 return 0;
7612 }
7613
7614 /* Returns assembly code to perform a DImode shift using
7615 a 64-bit global or out register on SPARC-V8+. */
7616 const char *
7617 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7618 {
7619 static char asm_code[60];
7620
7621 /* The scratch register is only required when the destination
7622 register is not a 64-bit global or out register. */
7623 if (which_alternative != 2)
7624 operands[3] = operands[0];
7625
7626 /* We can only shift by constants <= 63. */
7627 if (GET_CODE (operands[2]) == CONST_INT)
7628 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7629
7630 if (GET_CODE (operands[1]) == CONST_INT)
7631 {
7632 output_asm_insn ("mov\t%1, %3", operands);
7633 }
7634 else
7635 {
7636 output_asm_insn ("sllx\t%H1, 32, %3", operands);
7637 if (sparc_check_64 (operands[1], insn) <= 0)
7638 output_asm_insn ("srl\t%L1, 0, %L1", operands);
7639 output_asm_insn ("or\t%L1, %3, %3", operands);
7640 }
7641
7642 strcpy(asm_code, opcode);
7643
7644 if (which_alternative != 2)
7645 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7646 else
7647 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7648 }
7649 \f
7650 /* Output rtl to increment the profiler label LABELNO
7651 for profiling a function entry. */
7652
7653 void
7654 sparc_profile_hook (int labelno)
7655 {
7656 char buf[32];
7657 rtx lab, fun;
7658
7659 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7660 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7661 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7662
7663 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7664 }
7665 \f
7666 #ifdef OBJECT_FORMAT_ELF
7667 static void
7668 sparc_elf_asm_named_section (const char *name, unsigned int flags,
7669 tree decl)
7670 {
7671 if (flags & SECTION_MERGE)
7672 {
7673 /* entsize cannot be expressed in this section attributes
7674 encoding style. */
7675 default_elf_asm_named_section (name, flags, decl);
7676 return;
7677 }
7678
7679 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7680
7681 if (!(flags & SECTION_DEBUG))
7682 fputs (",#alloc", asm_out_file);
7683 if (flags & SECTION_WRITE)
7684 fputs (",#write", asm_out_file);
7685 if (flags & SECTION_TLS)
7686 fputs (",#tls", asm_out_file);
7687 if (flags & SECTION_CODE)
7688 fputs (",#execinstr", asm_out_file);
7689
7690 /* ??? Handle SECTION_BSS. */
7691
7692 fputc ('\n', asm_out_file);
7693 }
7694 #endif /* OBJECT_FORMAT_ELF */
7695
7696 /* We do not allow indirect calls to be optimized into sibling calls.
7697
7698 We cannot use sibling calls when delayed branches are disabled
7699 because they will likely require the call delay slot to be filled.
7700
7701 Also, on SPARC 32-bit we cannot emit a sibling call when the
7702 current function returns a structure. This is because the "unimp
7703 after call" convention would cause the callee to return to the
7704 wrong place. The generic code already disallows cases where the
7705 function being called returns a structure.
7706
7707 It may seem strange how this last case could occur. Usually there
7708 is code after the call which jumps to epilogue code which dumps the
7709 return value into the struct return area. That ought to invalidate
7710 the sibling call right? Well, in the C++ case we can end up passing
7711 the pointer to the struct return area to a constructor (which returns
7712 void) and then nothing else happens. Such a sibling call would look
7713 valid without the added check here. */
7714 static bool
7715 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7716 {
7717 return (decl
7718 && flag_delayed_branch
7719 && (TARGET_ARCH64 || ! current_function_returns_struct));
7720 }
7721 \f
7722 /* libfunc renaming. */
7723 #include "config/gofast.h"
7724
7725 static void
7726 sparc_init_libfuncs (void)
7727 {
7728 if (TARGET_ARCH32)
7729 {
7730 /* Use the subroutines that Sun's library provides for integer
7731 multiply and divide. The `*' prevents an underscore from
7732 being prepended by the compiler. .umul is a little faster
7733 than .mul. */
7734 set_optab_libfunc (smul_optab, SImode, "*.umul");
7735 set_optab_libfunc (sdiv_optab, SImode, "*.div");
7736 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7737 set_optab_libfunc (smod_optab, SImode, "*.rem");
7738 set_optab_libfunc (umod_optab, SImode, "*.urem");
7739
7740 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
7741 set_optab_libfunc (add_optab, TFmode, "_Q_add");
7742 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7743 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7744 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7745 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7746
7747 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
7748 is because with soft-float, the SFmode and DFmode sqrt
7749 instructions will be absent, and the compiler will notice and
7750 try to use the TFmode sqrt instruction for calls to the
7751 builtin function sqrt, but this fails. */
7752 if (TARGET_FPU)
7753 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7754
7755 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7756 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7757 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7758 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7759 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7760 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7761
7762 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
7763 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
7764 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
7765 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
7766
7767 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
7768 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
7769 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7770 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7771
7772 if (DITF_CONVERSION_LIBFUNCS)
7773 {
7774 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
7775 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
7776 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7777 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7778 }
7779
7780 if (SUN_CONVERSION_LIBFUNCS)
7781 {
7782 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7783 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7784 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7785 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7786 }
7787 }
7788 if (TARGET_ARCH64)
7789 {
7790 /* In the SPARC 64bit ABI, SImode multiply and divide functions
7791 do not exist in the library. Make sure the compiler does not
7792 emit calls to them by accident. (It should always use the
7793 hardware instructions.) */
7794 set_optab_libfunc (smul_optab, SImode, 0);
7795 set_optab_libfunc (sdiv_optab, SImode, 0);
7796 set_optab_libfunc (udiv_optab, SImode, 0);
7797 set_optab_libfunc (smod_optab, SImode, 0);
7798 set_optab_libfunc (umod_optab, SImode, 0);
7799
7800 if (SUN_INTEGER_MULTIPLY_64)
7801 {
7802 set_optab_libfunc (smul_optab, DImode, "__mul64");
7803 set_optab_libfunc (sdiv_optab, DImode, "__div64");
7804 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7805 set_optab_libfunc (smod_optab, DImode, "__rem64");
7806 set_optab_libfunc (umod_optab, DImode, "__urem64");
7807 }
7808
7809 if (SUN_CONVERSION_LIBFUNCS)
7810 {
7811 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7812 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7813 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7814 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7815 }
7816 }
7817
7818 gofast_maybe_init_libfuncs ();
7819 }
7820 \f
7821 #define def_builtin(NAME, CODE, TYPE) \
7822 lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7823 NULL_TREE)
7824
7825 /* Implement the TARGET_INIT_BUILTINS target hook.
7826 Create builtin functions for special SPARC instructions. */
7827
7828 static void
7829 sparc_init_builtins (void)
7830 {
7831 if (TARGET_VIS)
7832 sparc_vis_init_builtins ();
7833 }
7834
7835 /* Create builtin functions for VIS 1.0 instructions. */
7836
7837 static void
7838 sparc_vis_init_builtins (void)
7839 {
7840 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7841 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7842 tree v4hi = build_vector_type (intHI_type_node, 4);
7843 tree v2hi = build_vector_type (intHI_type_node, 2);
7844 tree v2si = build_vector_type (intSI_type_node, 2);
7845
7846 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7847 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7848 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7849 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7850 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7851 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7852 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7853 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7854 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7855 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7856 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7857 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7858 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7859 v8qi, v8qi,
7860 intDI_type_node, 0);
7861 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7862 intDI_type_node,
7863 intDI_type_node, 0);
7864 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7865 ptr_type_node,
7866 intSI_type_node, 0);
7867 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7868 ptr_type_node,
7869 intDI_type_node, 0);
7870
7871 /* Packing and expanding vectors. */
7872 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7873 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7874 v8qi_ftype_v2si_v8qi);
7875 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7876 v2hi_ftype_v2si);
7877 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7878 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7879 v8qi_ftype_v4qi_v4qi);
7880
7881 /* Multiplications. */
7882 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7883 v4hi_ftype_v4qi_v4hi);
7884 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7885 v4hi_ftype_v4qi_v2hi);
7886 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7887 v4hi_ftype_v4qi_v2hi);
7888 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7889 v4hi_ftype_v8qi_v4hi);
7890 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7891 v4hi_ftype_v8qi_v4hi);
7892 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7893 v2si_ftype_v4qi_v2hi);
7894 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7895 v2si_ftype_v4qi_v2hi);
7896
7897 /* Data aligning. */
7898 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7899 v4hi_ftype_v4hi_v4hi);
7900 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7901 v8qi_ftype_v8qi_v8qi);
7902 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7903 v2si_ftype_v2si_v2si);
7904 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7905 di_ftype_di_di);
7906 if (TARGET_ARCH64)
7907 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7908 ptr_ftype_ptr_di);
7909 else
7910 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7911 ptr_ftype_ptr_si);
7912
7913 /* Pixel distance. */
7914 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7915 di_ftype_v8qi_v8qi_di);
7916 }
7917
7918 /* Handle TARGET_EXPAND_BUILTIN target hook.
7919 Expand builtin functions for sparc intrinsics. */
7920
7921 static rtx
7922 sparc_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7923 enum machine_mode tmode, int ignore ATTRIBUTE_UNUSED)
7924 {
7925 tree arglist;
7926 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7927 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7928 rtx pat, op[4];
7929 enum machine_mode mode[4];
7930 int arg_count = 0;
7931
7932 mode[arg_count] = tmode;
7933
7934 if (target == 0
7935 || GET_MODE (target) != tmode
7936 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
7937 op[arg_count] = gen_reg_rtx (tmode);
7938 else
7939 op[arg_count] = target;
7940
7941 for (arglist = TREE_OPERAND (exp, 1); arglist;
7942 arglist = TREE_CHAIN (arglist))
7943 {
7944 tree arg = TREE_VALUE (arglist);
7945
7946 arg_count++;
7947 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
7948 op[arg_count] = expand_normal (arg);
7949
7950 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
7951 mode[arg_count]))
7952 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
7953 }
7954
7955 switch (arg_count)
7956 {
7957 case 1:
7958 pat = GEN_FCN (icode) (op[0], op[1]);
7959 break;
7960 case 2:
7961 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
7962 break;
7963 case 3:
7964 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
7965 break;
7966 default:
7967 gcc_unreachable ();
7968 }
7969
7970 if (!pat)
7971 return NULL_RTX;
7972
7973 emit_insn (pat);
7974
7975 return op[0];
7976 }
7977
7978 static int
7979 sparc_vis_mul8x16 (int e8, int e16)
7980 {
7981 return (e8 * e16 + 128) / 256;
7982 }
7983
7984 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
7985 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
7986 constants. A tree list with the results of the multiplications is returned,
7987 and each element in the list is of INNER_TYPE. */
7988
7989 static tree
7990 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
7991 {
7992 tree n_elts = NULL_TREE;
7993 int scale;
7994
7995 switch (fncode)
7996 {
7997 case CODE_FOR_fmul8x16_vis:
7998 for (; elts0 && elts1;
7999 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8000 {
8001 int val
8002 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8003 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8004 n_elts = tree_cons (NULL_TREE,
8005 build_int_cst (inner_type, val),
8006 n_elts);
8007 }
8008 break;
8009
8010 case CODE_FOR_fmul8x16au_vis:
8011 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8012
8013 for (; elts0; elts0 = TREE_CHAIN (elts0))
8014 {
8015 int val
8016 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8017 scale);
8018 n_elts = tree_cons (NULL_TREE,
8019 build_int_cst (inner_type, val),
8020 n_elts);
8021 }
8022 break;
8023
8024 case CODE_FOR_fmul8x16al_vis:
8025 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8026
8027 for (; elts0; elts0 = TREE_CHAIN (elts0))
8028 {
8029 int val
8030 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8031 scale);
8032 n_elts = tree_cons (NULL_TREE,
8033 build_int_cst (inner_type, val),
8034 n_elts);
8035 }
8036 break;
8037
8038 default:
8039 gcc_unreachable ();
8040 }
8041
8042 return nreverse (n_elts);
8043
8044 }
8045 /* Handle TARGET_FOLD_BUILTIN target hook.
8046 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8047 result of the function call is ignored. NULL_TREE is returned if the
8048 function could not be folded. */
8049
8050 static tree
8051 sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8052 {
8053 tree arg0, arg1, arg2;
8054 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8055
8056
8057 if (ignore && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8058 && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8059 return build_int_cst (rtype, 0);
8060
8061 switch (DECL_FUNCTION_CODE (fndecl))
8062 {
8063 case CODE_FOR_fexpand_vis:
8064 arg0 = TREE_VALUE (arglist);
8065 STRIP_NOPS (arg0);
8066
8067 if (TREE_CODE (arg0) == VECTOR_CST)
8068 {
8069 tree inner_type = TREE_TYPE (rtype);
8070 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8071 tree n_elts = NULL_TREE;
8072
8073 for (; elts; elts = TREE_CHAIN (elts))
8074 {
8075 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8076 n_elts = tree_cons (NULL_TREE,
8077 build_int_cst (inner_type, val),
8078 n_elts);
8079 }
8080 return build_vector (rtype, nreverse (n_elts));
8081 }
8082 break;
8083
8084 case CODE_FOR_fmul8x16_vis:
8085 case CODE_FOR_fmul8x16au_vis:
8086 case CODE_FOR_fmul8x16al_vis:
8087 arg0 = TREE_VALUE (arglist);
8088 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8089 STRIP_NOPS (arg0);
8090 STRIP_NOPS (arg1);
8091
8092 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8093 {
8094 tree inner_type = TREE_TYPE (rtype);
8095 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8096 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8097 tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8098 inner_type, elts0, elts1);
8099
8100 return build_vector (rtype, n_elts);
8101 }
8102 break;
8103
8104 case CODE_FOR_fpmerge_vis:
8105 arg0 = TREE_VALUE (arglist);
8106 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8107 STRIP_NOPS (arg0);
8108 STRIP_NOPS (arg1);
8109
8110 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8111 {
8112 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8113 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8114 tree n_elts = NULL_TREE;
8115
8116 for (; elts0 && elts1;
8117 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8118 {
8119 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8120 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8121 }
8122
8123 return build_vector (rtype, nreverse (n_elts));
8124 }
8125 break;
8126
8127 case CODE_FOR_pdist_vis:
8128 arg0 = TREE_VALUE (arglist);
8129 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8130 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8131 STRIP_NOPS (arg0);
8132 STRIP_NOPS (arg1);
8133 STRIP_NOPS (arg2);
8134
8135 if (TREE_CODE (arg0) == VECTOR_CST
8136 && TREE_CODE (arg1) == VECTOR_CST
8137 && TREE_CODE (arg2) == INTEGER_CST)
8138 {
8139 int overflow = 0;
8140 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8141 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8142 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8143 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8144
8145 for (; elts0 && elts1;
8146 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8147 {
8148 unsigned HOST_WIDE_INT
8149 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8150 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8151 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8152 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8153
8154 unsigned HOST_WIDE_INT l;
8155 HOST_WIDE_INT h;
8156
8157 overflow |= neg_double (low1, high1, &l, &h);
8158 overflow |= add_double (low0, high0, l, h, &l, &h);
8159 if (h < 0)
8160 overflow |= neg_double (l, h, &l, &h);
8161
8162 overflow |= add_double (low, high, l, h, &low, &high);
8163 }
8164
8165 gcc_assert (overflow == 0);
8166
8167 return build_int_cst_wide (rtype, low, high);
8168 }
8169
8170 default:
8171 break;
8172 }
8173 return NULL_TREE;
8174 }
8175 \f
8176 int
8177 sparc_extra_constraint_check (rtx op, int c, int strict)
8178 {
8179 int reload_ok_mem;
8180
8181 if (TARGET_ARCH64
8182 && (c == 'T' || c == 'U'))
8183 return 0;
8184
8185 switch (c)
8186 {
8187 case 'Q':
8188 return fp_sethi_p (op);
8189
8190 case 'R':
8191 return fp_mov_p (op);
8192
8193 case 'S':
8194 return fp_high_losum_p (op);
8195
8196 case 'U':
8197 if (! strict
8198 || (GET_CODE (op) == REG
8199 && (REGNO (op) < FIRST_PSEUDO_REGISTER
8200 || reg_renumber[REGNO (op)] >= 0)))
8201 return register_ok_for_ldd (op);
8202
8203 return 0;
8204
8205 case 'W':
8206 case 'T':
8207 break;
8208
8209 case 'Y':
8210 return const_zero_operand (op, GET_MODE (op));
8211
8212 default:
8213 return 0;
8214 }
8215
8216 /* Our memory extra constraints have to emulate the
8217 behavior of 'm' and 'o' in order for reload to work
8218 correctly. */
8219 if (GET_CODE (op) == MEM)
8220 {
8221 reload_ok_mem = 0;
8222 if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8223 && (! strict
8224 || strict_memory_address_p (Pmode, XEXP (op, 0))))
8225 reload_ok_mem = 1;
8226 }
8227 else
8228 {
8229 reload_ok_mem = (reload_in_progress
8230 && GET_CODE (op) == REG
8231 && REGNO (op) >= FIRST_PSEUDO_REGISTER
8232 && reg_renumber [REGNO (op)] < 0);
8233 }
8234
8235 return reload_ok_mem;
8236 }
8237
8238 /* ??? This duplicates information provided to the compiler by the
8239 ??? scheduler description. Some day, teach genautomata to output
8240 ??? the latencies and then CSE will just use that. */
8241
8242 static bool
8243 sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8244 {
8245 enum machine_mode mode = GET_MODE (x);
8246 bool float_mode_p = FLOAT_MODE_P (mode);
8247
8248 switch (code)
8249 {
8250 case CONST_INT:
8251 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8252 {
8253 *total = 0;
8254 return true;
8255 }
8256 /* FALLTHRU */
8257
8258 case HIGH:
8259 *total = 2;
8260 return true;
8261
8262 case CONST:
8263 case LABEL_REF:
8264 case SYMBOL_REF:
8265 *total = 4;
8266 return true;
8267
8268 case CONST_DOUBLE:
8269 if (GET_MODE (x) == VOIDmode
8270 && ((CONST_DOUBLE_HIGH (x) == 0
8271 && CONST_DOUBLE_LOW (x) < 0x1000)
8272 || (CONST_DOUBLE_HIGH (x) == -1
8273 && CONST_DOUBLE_LOW (x) < 0
8274 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8275 *total = 0;
8276 else
8277 *total = 8;
8278 return true;
8279
8280 case MEM:
8281 /* If outer-code was a sign or zero extension, a cost
8282 of COSTS_N_INSNS (1) was already added in. This is
8283 why we are subtracting it back out. */
8284 if (outer_code == ZERO_EXTEND)
8285 {
8286 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8287 }
8288 else if (outer_code == SIGN_EXTEND)
8289 {
8290 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8291 }
8292 else if (float_mode_p)
8293 {
8294 *total = sparc_costs->float_load;
8295 }
8296 else
8297 {
8298 *total = sparc_costs->int_load;
8299 }
8300
8301 return true;
8302
8303 case PLUS:
8304 case MINUS:
8305 if (float_mode_p)
8306 *total = sparc_costs->float_plusminus;
8307 else
8308 *total = COSTS_N_INSNS (1);
8309 return false;
8310
8311 case MULT:
8312 if (float_mode_p)
8313 *total = sparc_costs->float_mul;
8314 else if (! TARGET_HARD_MUL)
8315 *total = COSTS_N_INSNS (25);
8316 else
8317 {
8318 int bit_cost;
8319
8320 bit_cost = 0;
8321 if (sparc_costs->int_mul_bit_factor)
8322 {
8323 int nbits;
8324
8325 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8326 {
8327 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8328 for (nbits = 0; value != 0; value &= value - 1)
8329 nbits++;
8330 }
8331 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8332 && GET_MODE (XEXP (x, 1)) == VOIDmode)
8333 {
8334 rtx x1 = XEXP (x, 1);
8335 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8336 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8337
8338 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8339 nbits++;
8340 for (; value2 != 0; value2 &= value2 - 1)
8341 nbits++;
8342 }
8343 else
8344 nbits = 7;
8345
8346 if (nbits < 3)
8347 nbits = 3;
8348 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8349 bit_cost = COSTS_N_INSNS (bit_cost);
8350 }
8351
8352 if (mode == DImode)
8353 *total = sparc_costs->int_mulX + bit_cost;
8354 else
8355 *total = sparc_costs->int_mul + bit_cost;
8356 }
8357 return false;
8358
8359 case ASHIFT:
8360 case ASHIFTRT:
8361 case LSHIFTRT:
8362 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8363 return false;
8364
8365 case DIV:
8366 case UDIV:
8367 case MOD:
8368 case UMOD:
8369 if (float_mode_p)
8370 {
8371 if (mode == DFmode)
8372 *total = sparc_costs->float_div_df;
8373 else
8374 *total = sparc_costs->float_div_sf;
8375 }
8376 else
8377 {
8378 if (mode == DImode)
8379 *total = sparc_costs->int_divX;
8380 else
8381 *total = sparc_costs->int_div;
8382 }
8383 return false;
8384
8385 case NEG:
8386 if (! float_mode_p)
8387 {
8388 *total = COSTS_N_INSNS (1);
8389 return false;
8390 }
8391 /* FALLTHRU */
8392
8393 case ABS:
8394 case FLOAT:
8395 case UNSIGNED_FLOAT:
8396 case FIX:
8397 case UNSIGNED_FIX:
8398 case FLOAT_EXTEND:
8399 case FLOAT_TRUNCATE:
8400 *total = sparc_costs->float_move;
8401 return false;
8402
8403 case SQRT:
8404 if (mode == DFmode)
8405 *total = sparc_costs->float_sqrt_df;
8406 else
8407 *total = sparc_costs->float_sqrt_sf;
8408 return false;
8409
8410 case COMPARE:
8411 if (float_mode_p)
8412 *total = sparc_costs->float_cmp;
8413 else
8414 *total = COSTS_N_INSNS (1);
8415 return false;
8416
8417 case IF_THEN_ELSE:
8418 if (float_mode_p)
8419 *total = sparc_costs->float_cmove;
8420 else
8421 *total = sparc_costs->int_cmove;
8422 return false;
8423
8424 case IOR:
8425 /* Handle the NAND vector patterns. */
8426 if (sparc_vector_mode_supported_p (GET_MODE (x))
8427 && GET_CODE (XEXP (x, 0)) == NOT
8428 && GET_CODE (XEXP (x, 1)) == NOT)
8429 {
8430 *total = COSTS_N_INSNS (1);
8431 return true;
8432 }
8433 else
8434 return false;
8435
8436 default:
8437 return false;
8438 }
8439 }
8440
8441 /* Emit the sequence of insns SEQ while preserving the registers. */
8442
8443 static void
8444 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8445 {
8446 /* STACK_BOUNDARY guarantees that this is a 2-word slot. */
8447 rtx slot = gen_rtx_MEM (word_mode,
8448 plus_constant (stack_pointer_rtx, SPARC_STACK_BIAS));
8449
8450 emit_insn (gen_stack_pointer_dec (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8451 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8452 if (reg2)
8453 emit_insn (gen_rtx_SET (VOIDmode,
8454 adjust_address (slot, word_mode, UNITS_PER_WORD),
8455 reg2));
8456 emit_insn (seq);
8457 if (reg2)
8458 emit_insn (gen_rtx_SET (VOIDmode,
8459 reg2,
8460 adjust_address (slot, word_mode, UNITS_PER_WORD)));
8461 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8462 emit_insn (gen_stack_pointer_inc (GEN_INT (STACK_BOUNDARY/BITS_PER_UNIT)));
8463 }
8464
8465 /* Output the assembler code for a thunk function. THUNK_DECL is the
8466 declaration for the thunk function itself, FUNCTION is the decl for
8467 the target function. DELTA is an immediate constant offset to be
8468 added to THIS. If VCALL_OFFSET is nonzero, the word at address
8469 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
8470
8471 static void
8472 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8473 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8474 tree function)
8475 {
8476 rtx this, insn, funexp;
8477 unsigned int int_arg_first;
8478
8479 reload_completed = 1;
8480 epilogue_completed = 1;
8481 no_new_pseudos = 1;
8482 reset_block_changes ();
8483
8484 emit_note (NOTE_INSN_PROLOGUE_END);
8485
8486 if (flag_delayed_branch)
8487 {
8488 /* We will emit a regular sibcall below, so we need to instruct
8489 output_sibcall that we are in a leaf function. */
8490 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8491
8492 /* This will cause final.c to invoke leaf_renumber_regs so we
8493 must behave as if we were in a not-yet-leafified function. */
8494 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8495 }
8496 else
8497 {
8498 /* We will emit the sibcall manually below, so we will need to
8499 manually spill non-leaf registers. */
8500 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8501
8502 /* We really are in a leaf function. */
8503 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8504 }
8505
8506 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
8507 returns a structure, the structure return pointer is there instead. */
8508 if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8509 this = gen_rtx_REG (Pmode, int_arg_first + 1);
8510 else
8511 this = gen_rtx_REG (Pmode, int_arg_first);
8512
8513 /* Add DELTA. When possible use a plain add, otherwise load it into
8514 a register first. */
8515 if (delta)
8516 {
8517 rtx delta_rtx = GEN_INT (delta);
8518
8519 if (! SPARC_SIMM13_P (delta))
8520 {
8521 rtx scratch = gen_rtx_REG (Pmode, 1);
8522 emit_move_insn (scratch, delta_rtx);
8523 delta_rtx = scratch;
8524 }
8525
8526 /* THIS += DELTA. */
8527 emit_insn (gen_add2_insn (this, delta_rtx));
8528 }
8529
8530 /* Add the word at address (*THIS + VCALL_OFFSET). */
8531 if (vcall_offset)
8532 {
8533 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8534 rtx scratch = gen_rtx_REG (Pmode, 1);
8535
8536 gcc_assert (vcall_offset < 0);
8537
8538 /* SCRATCH = *THIS. */
8539 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8540
8541 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
8542 may not have any available scratch register at this point. */
8543 if (SPARC_SIMM13_P (vcall_offset))
8544 ;
8545 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
8546 else if (! fixed_regs[5]
8547 /* The below sequence is made up of at least 2 insns,
8548 while the default method may need only one. */
8549 && vcall_offset < -8192)
8550 {
8551 rtx scratch2 = gen_rtx_REG (Pmode, 5);
8552 emit_move_insn (scratch2, vcall_offset_rtx);
8553 vcall_offset_rtx = scratch2;
8554 }
8555 else
8556 {
8557 rtx increment = GEN_INT (-4096);
8558
8559 /* VCALL_OFFSET is a negative number whose typical range can be
8560 estimated as -32768..0 in 32-bit mode. In almost all cases
8561 it is therefore cheaper to emit multiple add insns than
8562 spilling and loading the constant into a register (at least
8563 6 insns). */
8564 while (! SPARC_SIMM13_P (vcall_offset))
8565 {
8566 emit_insn (gen_add2_insn (scratch, increment));
8567 vcall_offset += 4096;
8568 }
8569 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8570 }
8571
8572 /* SCRATCH = *(*THIS + VCALL_OFFSET). */
8573 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8574 gen_rtx_PLUS (Pmode,
8575 scratch,
8576 vcall_offset_rtx)));
8577
8578 /* THIS += *(*THIS + VCALL_OFFSET). */
8579 emit_insn (gen_add2_insn (this, scratch));
8580 }
8581
8582 /* Generate a tail call to the target function. */
8583 if (! TREE_USED (function))
8584 {
8585 assemble_external (function);
8586 TREE_USED (function) = 1;
8587 }
8588 funexp = XEXP (DECL_RTL (function), 0);
8589
8590 if (flag_delayed_branch)
8591 {
8592 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8593 insn = emit_call_insn (gen_sibcall (funexp));
8594 SIBLING_CALL_P (insn) = 1;
8595 }
8596 else
8597 {
8598 /* The hoops we have to jump through in order to generate a sibcall
8599 without using delay slots... */
8600 rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8601
8602 if (flag_pic)
8603 {
8604 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
8605 spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8606 start_sequence ();
8607 /* Delay emitting the PIC helper function because it needs to
8608 change the section and we are emitting assembly code. */
8609 load_pic_register (true); /* clobbers %o7 */
8610 scratch = legitimize_pic_address (funexp, Pmode, scratch);
8611 seq = get_insns ();
8612 end_sequence ();
8613 emit_and_preserve (seq, spill_reg, spill_reg2);
8614 }
8615 else if (TARGET_ARCH32)
8616 {
8617 emit_insn (gen_rtx_SET (VOIDmode,
8618 scratch,
8619 gen_rtx_HIGH (SImode, funexp)));
8620 emit_insn (gen_rtx_SET (VOIDmode,
8621 scratch,
8622 gen_rtx_LO_SUM (SImode, scratch, funexp)));
8623 }
8624 else /* TARGET_ARCH64 */
8625 {
8626 switch (sparc_cmodel)
8627 {
8628 case CM_MEDLOW:
8629 case CM_MEDMID:
8630 /* The destination can serve as a temporary. */
8631 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8632 break;
8633
8634 case CM_MEDANY:
8635 case CM_EMBMEDANY:
8636 /* The destination cannot serve as a temporary. */
8637 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
8638 start_sequence ();
8639 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8640 seq = get_insns ();
8641 end_sequence ();
8642 emit_and_preserve (seq, spill_reg, 0);
8643 break;
8644
8645 default:
8646 gcc_unreachable ();
8647 }
8648 }
8649
8650 emit_jump_insn (gen_indirect_jump (scratch));
8651 }
8652
8653 emit_barrier ();
8654
8655 /* Run just enough of rest_of_compilation to get the insns emitted.
8656 There's not really enough bulk here to make other passes such as
8657 instruction scheduling worth while. Note that use_thunk calls
8658 assemble_start_function and assemble_end_function. */
8659 insn = get_insns ();
8660 insn_locators_initialize ();
8661 shorten_branches (insn);
8662 final_start_function (insn, file, 1);
8663 final (insn, file, 1);
8664 final_end_function ();
8665
8666 reload_completed = 0;
8667 epilogue_completed = 0;
8668 no_new_pseudos = 0;
8669 }
8670
8671 /* Return true if sparc_output_mi_thunk would be able to output the
8672 assembler code for the thunk function specified by the arguments
8673 it is passed, and false otherwise. */
8674 static bool
8675 sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8676 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8677 HOST_WIDE_INT vcall_offset,
8678 tree function ATTRIBUTE_UNUSED)
8679 {
8680 /* Bound the loop used in the default method above. */
8681 return (vcall_offset >= -32768 || ! fixed_regs[5]);
8682 }
8683
8684 /* How to allocate a 'struct machine_function'. */
8685
8686 static struct machine_function *
8687 sparc_init_machine_status (void)
8688 {
8689 return ggc_alloc_cleared (sizeof (struct machine_function));
8690 }
8691
8692 /* Locate some local-dynamic symbol still in use by this function
8693 so that we can print its name in local-dynamic base patterns. */
8694
8695 static const char *
8696 get_some_local_dynamic_name (void)
8697 {
8698 rtx insn;
8699
8700 if (cfun->machine->some_ld_name)
8701 return cfun->machine->some_ld_name;
8702
8703 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8704 if (INSN_P (insn)
8705 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8706 return cfun->machine->some_ld_name;
8707
8708 gcc_unreachable ();
8709 }
8710
8711 static int
8712 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8713 {
8714 rtx x = *px;
8715
8716 if (x
8717 && GET_CODE (x) == SYMBOL_REF
8718 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8719 {
8720 cfun->machine->some_ld_name = XSTR (x, 0);
8721 return 1;
8722 }
8723
8724 return 0;
8725 }
8726
8727 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8728 This is called from dwarf2out.c to emit call frame instructions
8729 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8730 static void
8731 sparc_dwarf_handle_frame_unspec (const char *label,
8732 rtx pattern ATTRIBUTE_UNUSED,
8733 int index ATTRIBUTE_UNUSED)
8734 {
8735 gcc_assert (index == UNSPECV_SAVEW);
8736 dwarf2out_window_save (label);
8737 }
8738
8739 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8740 We need to emit DTP-relative relocations. */
8741
8742 static void
8743 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8744 {
8745 switch (size)
8746 {
8747 case 4:
8748 fputs ("\t.word\t%r_tls_dtpoff32(", file);
8749 break;
8750 case 8:
8751 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8752 break;
8753 default:
8754 gcc_unreachable ();
8755 }
8756 output_addr_const (file, x);
8757 fputs (")", file);
8758 }
8759
8760 /* Do whatever processing is required at the end of a file. */
8761
8762 static void
8763 sparc_file_end (void)
8764 {
8765 /* If we haven't emitted the special PIC helper function, do so now. */
8766 if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8767 emit_pic_helper ();
8768
8769 if (NEED_INDICATE_EXEC_STACK)
8770 file_end_indicate_exec_stack ();
8771 }
8772
8773 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8774 /* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE. */
8775
8776 static const char *
8777 sparc_mangle_fundamental_type (tree type)
8778 {
8779 if (!TARGET_64BIT
8780 && TYPE_MAIN_VARIANT (type) == long_double_type_node
8781 && TARGET_LONG_DOUBLE_128)
8782 return "g";
8783
8784 /* For all other types, use normal C++ mangling. */
8785 return NULL;
8786 }
8787 #endif
8788
8789 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8790 compare and swap on the word containing the byte or half-word. */
8791
8792 void
8793 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8794 {
8795 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8796 rtx addr = gen_reg_rtx (Pmode);
8797 rtx off = gen_reg_rtx (SImode);
8798 rtx oldv = gen_reg_rtx (SImode);
8799 rtx newv = gen_reg_rtx (SImode);
8800 rtx oldvalue = gen_reg_rtx (SImode);
8801 rtx newvalue = gen_reg_rtx (SImode);
8802 rtx res = gen_reg_rtx (SImode);
8803 rtx resv = gen_reg_rtx (SImode);
8804 rtx memsi, val, mask, end_label, loop_label, cc;
8805
8806 emit_insn (gen_rtx_SET (VOIDmode, addr,
8807 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8808
8809 if (Pmode != SImode)
8810 addr1 = gen_lowpart (SImode, addr1);
8811 emit_insn (gen_rtx_SET (VOIDmode, off,
8812 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8813
8814 memsi = gen_rtx_MEM (SImode, addr);
8815 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8816 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8817
8818 val = force_reg (SImode, memsi);
8819
8820 emit_insn (gen_rtx_SET (VOIDmode, off,
8821 gen_rtx_XOR (SImode, off,
8822 GEN_INT (GET_MODE (mem) == QImode
8823 ? 3 : 2))));
8824
8825 emit_insn (gen_rtx_SET (VOIDmode, off,
8826 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8827
8828 if (GET_MODE (mem) == QImode)
8829 mask = force_reg (SImode, GEN_INT (0xff));
8830 else
8831 mask = force_reg (SImode, GEN_INT (0xffff));
8832
8833 emit_insn (gen_rtx_SET (VOIDmode, mask,
8834 gen_rtx_ASHIFT (SImode, mask, off)));
8835
8836 emit_insn (gen_rtx_SET (VOIDmode, val,
8837 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8838 val)));
8839
8840 oldval = gen_lowpart (SImode, oldval);
8841 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8842 gen_rtx_ASHIFT (SImode, oldval, off)));
8843
8844 newval = gen_lowpart_common (SImode, newval);
8845 emit_insn (gen_rtx_SET (VOIDmode, newv,
8846 gen_rtx_ASHIFT (SImode, newval, off)));
8847
8848 emit_insn (gen_rtx_SET (VOIDmode, oldv,
8849 gen_rtx_AND (SImode, oldv, mask)));
8850
8851 emit_insn (gen_rtx_SET (VOIDmode, newv,
8852 gen_rtx_AND (SImode, newv, mask)));
8853
8854 end_label = gen_label_rtx ();
8855 loop_label = gen_label_rtx ();
8856 emit_label (loop_label);
8857
8858 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8859 gen_rtx_IOR (SImode, oldv, val)));
8860
8861 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8862 gen_rtx_IOR (SImode, newv, val)));
8863
8864 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8865
8866 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8867
8868 emit_insn (gen_rtx_SET (VOIDmode, resv,
8869 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8870 res)));
8871
8872 sparc_compare_op0 = resv;
8873 sparc_compare_op1 = val;
8874 cc = gen_compare_reg (NE);
8875
8876 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8877
8878 sparc_compare_emitted = cc;
8879 emit_jump_insn (gen_bne (loop_label));
8880
8881 emit_label (end_label);
8882
8883 emit_insn (gen_rtx_SET (VOIDmode, res,
8884 gen_rtx_AND (SImode, res, mask)));
8885
8886 emit_insn (gen_rtx_SET (VOIDmode, res,
8887 gen_rtx_LSHIFTRT (SImode, res, off)));
8888
8889 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
8890 }
8891
8892 #include "gt-sparc.h"