de9a7eb6929c9da0ce6021fb7816b6cfbc6b5654
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "common/common-target.h"
52 #include "cfglayout.h"
53 #include "gimple.h"
54 #include "langhooks.h"
55 #include "reload.h"
56 #include "params.h"
57 #include "df.h"
58 #include "dwarf2out.h"
59 #include "opts.h"
60
61 /* Processor costs */
62 static const
63 struct processor_costs cypress_costs = {
64 COSTS_N_INSNS (2), /* int load */
65 COSTS_N_INSNS (2), /* int signed load */
66 COSTS_N_INSNS (2), /* int zeroed load */
67 COSTS_N_INSNS (2), /* float load */
68 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
69 COSTS_N_INSNS (5), /* fadd, fsub */
70 COSTS_N_INSNS (1), /* fcmp */
71 COSTS_N_INSNS (1), /* fmov, fmovr */
72 COSTS_N_INSNS (7), /* fmul */
73 COSTS_N_INSNS (37), /* fdivs */
74 COSTS_N_INSNS (37), /* fdivd */
75 COSTS_N_INSNS (63), /* fsqrts */
76 COSTS_N_INSNS (63), /* fsqrtd */
77 COSTS_N_INSNS (1), /* imul */
78 COSTS_N_INSNS (1), /* imulX */
79 0, /* imul bit factor */
80 COSTS_N_INSNS (1), /* idiv */
81 COSTS_N_INSNS (1), /* idivX */
82 COSTS_N_INSNS (1), /* movcc/movr */
83 0, /* shift penalty */
84 };
85
86 static const
87 struct processor_costs supersparc_costs = {
88 COSTS_N_INSNS (1), /* int load */
89 COSTS_N_INSNS (1), /* int signed load */
90 COSTS_N_INSNS (1), /* int zeroed load */
91 COSTS_N_INSNS (0), /* float load */
92 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
93 COSTS_N_INSNS (3), /* fadd, fsub */
94 COSTS_N_INSNS (3), /* fcmp */
95 COSTS_N_INSNS (1), /* fmov, fmovr */
96 COSTS_N_INSNS (3), /* fmul */
97 COSTS_N_INSNS (6), /* fdivs */
98 COSTS_N_INSNS (9), /* fdivd */
99 COSTS_N_INSNS (12), /* fsqrts */
100 COSTS_N_INSNS (12), /* fsqrtd */
101 COSTS_N_INSNS (4), /* imul */
102 COSTS_N_INSNS (4), /* imulX */
103 0, /* imul bit factor */
104 COSTS_N_INSNS (4), /* idiv */
105 COSTS_N_INSNS (4), /* idivX */
106 COSTS_N_INSNS (1), /* movcc/movr */
107 1, /* shift penalty */
108 };
109
110 static const
111 struct processor_costs hypersparc_costs = {
112 COSTS_N_INSNS (1), /* int load */
113 COSTS_N_INSNS (1), /* int signed load */
114 COSTS_N_INSNS (1), /* int zeroed load */
115 COSTS_N_INSNS (1), /* float load */
116 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
117 COSTS_N_INSNS (1), /* fadd, fsub */
118 COSTS_N_INSNS (1), /* fcmp */
119 COSTS_N_INSNS (1), /* fmov, fmovr */
120 COSTS_N_INSNS (1), /* fmul */
121 COSTS_N_INSNS (8), /* fdivs */
122 COSTS_N_INSNS (12), /* fdivd */
123 COSTS_N_INSNS (17), /* fsqrts */
124 COSTS_N_INSNS (17), /* fsqrtd */
125 COSTS_N_INSNS (17), /* imul */
126 COSTS_N_INSNS (17), /* imulX */
127 0, /* imul bit factor */
128 COSTS_N_INSNS (17), /* idiv */
129 COSTS_N_INSNS (17), /* idivX */
130 COSTS_N_INSNS (1), /* movcc/movr */
131 0, /* shift penalty */
132 };
133
134 static const
135 struct processor_costs leon_costs = {
136 COSTS_N_INSNS (1), /* int load */
137 COSTS_N_INSNS (1), /* int signed load */
138 COSTS_N_INSNS (1), /* int zeroed load */
139 COSTS_N_INSNS (1), /* float load */
140 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
141 COSTS_N_INSNS (1), /* fadd, fsub */
142 COSTS_N_INSNS (1), /* fcmp */
143 COSTS_N_INSNS (1), /* fmov, fmovr */
144 COSTS_N_INSNS (1), /* fmul */
145 COSTS_N_INSNS (15), /* fdivs */
146 COSTS_N_INSNS (15), /* fdivd */
147 COSTS_N_INSNS (23), /* fsqrts */
148 COSTS_N_INSNS (23), /* fsqrtd */
149 COSTS_N_INSNS (5), /* imul */
150 COSTS_N_INSNS (5), /* imulX */
151 0, /* imul bit factor */
152 COSTS_N_INSNS (5), /* idiv */
153 COSTS_N_INSNS (5), /* idivX */
154 COSTS_N_INSNS (1), /* movcc/movr */
155 0, /* shift penalty */
156 };
157
158 static const
159 struct processor_costs sparclet_costs = {
160 COSTS_N_INSNS (3), /* int load */
161 COSTS_N_INSNS (3), /* int signed load */
162 COSTS_N_INSNS (1), /* int zeroed load */
163 COSTS_N_INSNS (1), /* float load */
164 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
165 COSTS_N_INSNS (1), /* fadd, fsub */
166 COSTS_N_INSNS (1), /* fcmp */
167 COSTS_N_INSNS (1), /* fmov, fmovr */
168 COSTS_N_INSNS (1), /* fmul */
169 COSTS_N_INSNS (1), /* fdivs */
170 COSTS_N_INSNS (1), /* fdivd */
171 COSTS_N_INSNS (1), /* fsqrts */
172 COSTS_N_INSNS (1), /* fsqrtd */
173 COSTS_N_INSNS (5), /* imul */
174 COSTS_N_INSNS (5), /* imulX */
175 0, /* imul bit factor */
176 COSTS_N_INSNS (5), /* idiv */
177 COSTS_N_INSNS (5), /* idivX */
178 COSTS_N_INSNS (1), /* movcc/movr */
179 0, /* shift penalty */
180 };
181
182 static const
183 struct processor_costs ultrasparc_costs = {
184 COSTS_N_INSNS (2), /* int load */
185 COSTS_N_INSNS (3), /* int signed load */
186 COSTS_N_INSNS (2), /* int zeroed load */
187 COSTS_N_INSNS (2), /* float load */
188 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
189 COSTS_N_INSNS (4), /* fadd, fsub */
190 COSTS_N_INSNS (1), /* fcmp */
191 COSTS_N_INSNS (2), /* fmov, fmovr */
192 COSTS_N_INSNS (4), /* fmul */
193 COSTS_N_INSNS (13), /* fdivs */
194 COSTS_N_INSNS (23), /* fdivd */
195 COSTS_N_INSNS (13), /* fsqrts */
196 COSTS_N_INSNS (23), /* fsqrtd */
197 COSTS_N_INSNS (4), /* imul */
198 COSTS_N_INSNS (4), /* imulX */
199 2, /* imul bit factor */
200 COSTS_N_INSNS (37), /* idiv */
201 COSTS_N_INSNS (68), /* idivX */
202 COSTS_N_INSNS (2), /* movcc/movr */
203 2, /* shift penalty */
204 };
205
206 static const
207 struct processor_costs ultrasparc3_costs = {
208 COSTS_N_INSNS (2), /* int load */
209 COSTS_N_INSNS (3), /* int signed load */
210 COSTS_N_INSNS (3), /* int zeroed load */
211 COSTS_N_INSNS (2), /* float load */
212 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
213 COSTS_N_INSNS (4), /* fadd, fsub */
214 COSTS_N_INSNS (5), /* fcmp */
215 COSTS_N_INSNS (3), /* fmov, fmovr */
216 COSTS_N_INSNS (4), /* fmul */
217 COSTS_N_INSNS (17), /* fdivs */
218 COSTS_N_INSNS (20), /* fdivd */
219 COSTS_N_INSNS (20), /* fsqrts */
220 COSTS_N_INSNS (29), /* fsqrtd */
221 COSTS_N_INSNS (6), /* imul */
222 COSTS_N_INSNS (6), /* imulX */
223 0, /* imul bit factor */
224 COSTS_N_INSNS (40), /* idiv */
225 COSTS_N_INSNS (71), /* idivX */
226 COSTS_N_INSNS (2), /* movcc/movr */
227 0, /* shift penalty */
228 };
229
230 static const
231 struct processor_costs niagara_costs = {
232 COSTS_N_INSNS (3), /* int load */
233 COSTS_N_INSNS (3), /* int signed load */
234 COSTS_N_INSNS (3), /* int zeroed load */
235 COSTS_N_INSNS (9), /* float load */
236 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
237 COSTS_N_INSNS (8), /* fadd, fsub */
238 COSTS_N_INSNS (26), /* fcmp */
239 COSTS_N_INSNS (8), /* fmov, fmovr */
240 COSTS_N_INSNS (29), /* fmul */
241 COSTS_N_INSNS (54), /* fdivs */
242 COSTS_N_INSNS (83), /* fdivd */
243 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
244 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
245 COSTS_N_INSNS (11), /* imul */
246 COSTS_N_INSNS (11), /* imulX */
247 0, /* imul bit factor */
248 COSTS_N_INSNS (72), /* idiv */
249 COSTS_N_INSNS (72), /* idivX */
250 COSTS_N_INSNS (1), /* movcc/movr */
251 0, /* shift penalty */
252 };
253
254 static const
255 struct processor_costs niagara2_costs = {
256 COSTS_N_INSNS (3), /* int load */
257 COSTS_N_INSNS (3), /* int signed load */
258 COSTS_N_INSNS (3), /* int zeroed load */
259 COSTS_N_INSNS (3), /* float load */
260 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
261 COSTS_N_INSNS (6), /* fadd, fsub */
262 COSTS_N_INSNS (6), /* fcmp */
263 COSTS_N_INSNS (6), /* fmov, fmovr */
264 COSTS_N_INSNS (6), /* fmul */
265 COSTS_N_INSNS (19), /* fdivs */
266 COSTS_N_INSNS (33), /* fdivd */
267 COSTS_N_INSNS (19), /* fsqrts */
268 COSTS_N_INSNS (33), /* fsqrtd */
269 COSTS_N_INSNS (5), /* imul */
270 COSTS_N_INSNS (5), /* imulX */
271 0, /* imul bit factor */
272 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
274 COSTS_N_INSNS (1), /* movcc/movr */
275 0, /* shift penalty */
276 };
277
278 const struct processor_costs *sparc_costs = &cypress_costs;
279
280 #ifdef HAVE_AS_RELAX_OPTION
281 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
282 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
283 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
284 somebody does not branch between the sethi and jmp. */
285 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
286 #else
287 #define LEAF_SIBCALL_SLOT_RESERVED_P \
288 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
289 #endif
290
291 /* Vector to say how input registers are mapped to output registers.
292 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
293 eliminate it. You must use -fomit-frame-pointer to get that. */
294 char leaf_reg_remap[] =
295 { 0, 1, 2, 3, 4, 5, 6, 7,
296 -1, -1, -1, -1, -1, -1, 14, -1,
297 -1, -1, -1, -1, -1, -1, -1, -1,
298 8, 9, 10, 11, 12, 13, -1, 15,
299
300 32, 33, 34, 35, 36, 37, 38, 39,
301 40, 41, 42, 43, 44, 45, 46, 47,
302 48, 49, 50, 51, 52, 53, 54, 55,
303 56, 57, 58, 59, 60, 61, 62, 63,
304 64, 65, 66, 67, 68, 69, 70, 71,
305 72, 73, 74, 75, 76, 77, 78, 79,
306 80, 81, 82, 83, 84, 85, 86, 87,
307 88, 89, 90, 91, 92, 93, 94, 95,
308 96, 97, 98, 99, 100};
309
310 /* Vector, indexed by hard register number, which contains 1
311 for a register that is allowable in a candidate for leaf
312 function treatment. */
313 char sparc_leaf_regs[] =
314 { 1, 1, 1, 1, 1, 1, 1, 1,
315 0, 0, 0, 0, 0, 0, 1, 0,
316 0, 0, 0, 0, 0, 0, 0, 0,
317 1, 1, 1, 1, 1, 1, 0, 1,
318 1, 1, 1, 1, 1, 1, 1, 1,
319 1, 1, 1, 1, 1, 1, 1, 1,
320 1, 1, 1, 1, 1, 1, 1, 1,
321 1, 1, 1, 1, 1, 1, 1, 1,
322 1, 1, 1, 1, 1, 1, 1, 1,
323 1, 1, 1, 1, 1, 1, 1, 1,
324 1, 1, 1, 1, 1, 1, 1, 1,
325 1, 1, 1, 1, 1, 1, 1, 1,
326 1, 1, 1, 1, 1};
327
328 struct GTY(()) machine_function
329 {
330 /* Size of the frame of the function. */
331 HOST_WIDE_INT frame_size;
332
333 /* Size of the frame of the function minus the register window save area
334 and the outgoing argument area. */
335 HOST_WIDE_INT apparent_frame_size;
336
337 /* Register we pretend the frame pointer is allocated to. Normally, this
338 is %fp, but if we are in a leaf procedure, this is (%sp + offset). We
339 record "offset" separately as it may be too big for (reg + disp). */
340 rtx frame_base_reg;
341 HOST_WIDE_INT frame_base_offset;
342
343 /* Some local-dynamic TLS symbol name. */
344 const char *some_ld_name;
345
346 /* Number of global or FP registers to be saved (as 4-byte quantities). */
347 int n_global_fp_regs;
348
349 /* True if the current function is leaf and uses only leaf regs,
350 so that the SPARC leaf function optimization can be applied.
351 Private version of current_function_uses_only_leaf_regs, see
352 sparc_expand_prologue for the rationale. */
353 int leaf_function_p;
354
355 /* True if the prologue saves local or in registers. */
356 bool save_local_in_regs_p;
357
358 /* True if the data calculated by sparc_expand_prologue are valid. */
359 bool prologue_data_valid_p;
360 };
361
362 #define sparc_frame_size cfun->machine->frame_size
363 #define sparc_apparent_frame_size cfun->machine->apparent_frame_size
364 #define sparc_frame_base_reg cfun->machine->frame_base_reg
365 #define sparc_frame_base_offset cfun->machine->frame_base_offset
366 #define sparc_n_global_fp_regs cfun->machine->n_global_fp_regs
367 #define sparc_leaf_function_p cfun->machine->leaf_function_p
368 #define sparc_save_local_in_regs_p cfun->machine->save_local_in_regs_p
369 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
370
371 /* 1 if the next opcode is to be specially indented. */
372 int sparc_indent_opcode = 0;
373
374 static void sparc_option_override (void);
375 static void sparc_init_modes (void);
376 static void scan_record_type (const_tree, int *, int *, int *);
377 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
378 const_tree, bool, bool, int *, int *);
379
380 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
381 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
382
383 static void sparc_emit_set_const32 (rtx, rtx);
384 static void sparc_emit_set_const64 (rtx, rtx);
385 static void sparc_output_addr_vec (rtx);
386 static void sparc_output_addr_diff_vec (rtx);
387 static void sparc_output_deferred_case_vectors (void);
388 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
389 static bool sparc_legitimate_constant_p (enum machine_mode, rtx);
390 static rtx sparc_builtin_saveregs (void);
391 static int epilogue_renumber (rtx *, int);
392 static bool sparc_assemble_integer (rtx, unsigned int, int);
393 static int set_extends (rtx);
394 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
395 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
396 #ifdef TARGET_SOLARIS
397 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
398 tree) ATTRIBUTE_UNUSED;
399 #endif
400 static int sparc_adjust_cost (rtx, rtx, rtx, int);
401 static int sparc_issue_rate (void);
402 static void sparc_sched_init (FILE *, int, int);
403 static int sparc_use_sched_lookahead (void);
404
405 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
406 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
407 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
408 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
409 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
410
411 static bool sparc_function_ok_for_sibcall (tree, tree);
412 static void sparc_init_libfuncs (void);
413 static void sparc_init_builtins (void);
414 static void sparc_vis_init_builtins (void);
415 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
416 static tree sparc_fold_builtin (tree, int, tree *, bool);
417 static int sparc_vis_mul8x16 (int, int);
418 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
419 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
420 HOST_WIDE_INT, tree);
421 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
422 HOST_WIDE_INT, const_tree);
423 static struct machine_function * sparc_init_machine_status (void);
424 static bool sparc_cannot_force_const_mem (enum machine_mode, rtx);
425 static rtx sparc_tls_get_addr (void);
426 static rtx sparc_tls_got (void);
427 static const char *get_some_local_dynamic_name (void);
428 static int get_some_local_dynamic_name_1 (rtx *, void *);
429 static int sparc_register_move_cost (enum machine_mode,
430 reg_class_t, reg_class_t);
431 static bool sparc_rtx_costs (rtx, int, int, int, int *, bool);
432 static rtx sparc_function_value (const_tree, const_tree, bool);
433 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
434 static bool sparc_function_value_regno_p (const unsigned int);
435 static rtx sparc_struct_value_rtx (tree, int);
436 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
437 int *, const_tree, int);
438 static bool sparc_return_in_memory (const_tree, const_tree);
439 static bool sparc_strict_argument_naming (cumulative_args_t);
440 static void sparc_va_start (tree, rtx);
441 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
442 static bool sparc_vector_mode_supported_p (enum machine_mode);
443 static bool sparc_tls_referenced_p (rtx);
444 static rtx sparc_legitimize_tls_address (rtx);
445 static rtx sparc_legitimize_pic_address (rtx, rtx);
446 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
447 static rtx sparc_delegitimize_address (rtx);
448 static bool sparc_mode_dependent_address_p (const_rtx);
449 static bool sparc_pass_by_reference (cumulative_args_t,
450 enum machine_mode, const_tree, bool);
451 static void sparc_function_arg_advance (cumulative_args_t,
452 enum machine_mode, const_tree, bool);
453 static rtx sparc_function_arg_1 (cumulative_args_t,
454 enum machine_mode, const_tree, bool, bool);
455 static rtx sparc_function_arg (cumulative_args_t,
456 enum machine_mode, const_tree, bool);
457 static rtx sparc_function_incoming_arg (cumulative_args_t,
458 enum machine_mode, const_tree, bool);
459 static unsigned int sparc_function_arg_boundary (enum machine_mode,
460 const_tree);
461 static int sparc_arg_partial_bytes (cumulative_args_t,
462 enum machine_mode, tree, bool);
463 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
464 static void sparc_file_end (void);
465 static bool sparc_frame_pointer_required (void);
466 static bool sparc_can_eliminate (const int, const int);
467 static rtx sparc_builtin_setjmp_frame_value (void);
468 static void sparc_conditional_register_usage (void);
469 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
470 static const char *sparc_mangle_type (const_tree);
471 #endif
472 static void sparc_trampoline_init (rtx, tree, rtx);
473 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
474 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
475 static bool sparc_print_operand_punct_valid_p (unsigned char);
476 static void sparc_print_operand (FILE *, rtx, int);
477 static void sparc_print_operand_address (FILE *, rtx);
478 \f
479 #ifdef SUBTARGET_ATTRIBUTE_TABLE
480 /* Table of valid machine attributes. */
481 static const struct attribute_spec sparc_attribute_table[] =
482 {
483 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
484 do_diagnostic } */
485 SUBTARGET_ATTRIBUTE_TABLE,
486 { NULL, 0, 0, false, false, false, NULL, false }
487 };
488 #endif
489 \f
490 /* Option handling. */
491
492 /* Parsed value. */
493 enum cmodel sparc_cmodel;
494
495 char sparc_hard_reg_printed[8];
496
497 /* Initialize the GCC target structure. */
498
499 /* The default is to use .half rather than .short for aligned HI objects. */
500 #undef TARGET_ASM_ALIGNED_HI_OP
501 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
502
503 #undef TARGET_ASM_UNALIGNED_HI_OP
504 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
505 #undef TARGET_ASM_UNALIGNED_SI_OP
506 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
507 #undef TARGET_ASM_UNALIGNED_DI_OP
508 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
509
510 /* The target hook has to handle DI-mode values. */
511 #undef TARGET_ASM_INTEGER
512 #define TARGET_ASM_INTEGER sparc_assemble_integer
513
514 #undef TARGET_ASM_FUNCTION_PROLOGUE
515 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
516 #undef TARGET_ASM_FUNCTION_EPILOGUE
517 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
518
519 #undef TARGET_SCHED_ADJUST_COST
520 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
521 #undef TARGET_SCHED_ISSUE_RATE
522 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
523 #undef TARGET_SCHED_INIT
524 #define TARGET_SCHED_INIT sparc_sched_init
525 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
526 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
527
528 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
529 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
530
531 #undef TARGET_INIT_LIBFUNCS
532 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
533 #undef TARGET_INIT_BUILTINS
534 #define TARGET_INIT_BUILTINS sparc_init_builtins
535
536 #undef TARGET_LEGITIMIZE_ADDRESS
537 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
538 #undef TARGET_DELEGITIMIZE_ADDRESS
539 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
540 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
541 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
542
543 #undef TARGET_EXPAND_BUILTIN
544 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
545 #undef TARGET_FOLD_BUILTIN
546 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
547
548 #if TARGET_TLS
549 #undef TARGET_HAVE_TLS
550 #define TARGET_HAVE_TLS true
551 #endif
552
553 #undef TARGET_CANNOT_FORCE_CONST_MEM
554 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
555
556 #undef TARGET_ASM_OUTPUT_MI_THUNK
557 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
558 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
559 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
560
561 #undef TARGET_RTX_COSTS
562 #define TARGET_RTX_COSTS sparc_rtx_costs
563 #undef TARGET_ADDRESS_COST
564 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
565 #undef TARGET_REGISTER_MOVE_COST
566 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
567
568 #undef TARGET_PROMOTE_FUNCTION_MODE
569 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
570
571 #undef TARGET_FUNCTION_VALUE
572 #define TARGET_FUNCTION_VALUE sparc_function_value
573 #undef TARGET_LIBCALL_VALUE
574 #define TARGET_LIBCALL_VALUE sparc_libcall_value
575 #undef TARGET_FUNCTION_VALUE_REGNO_P
576 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
577
578 #undef TARGET_STRUCT_VALUE_RTX
579 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
580 #undef TARGET_RETURN_IN_MEMORY
581 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
582 #undef TARGET_MUST_PASS_IN_STACK
583 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
584 #undef TARGET_PASS_BY_REFERENCE
585 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
586 #undef TARGET_ARG_PARTIAL_BYTES
587 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
588 #undef TARGET_FUNCTION_ARG_ADVANCE
589 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
590 #undef TARGET_FUNCTION_ARG
591 #define TARGET_FUNCTION_ARG sparc_function_arg
592 #undef TARGET_FUNCTION_INCOMING_ARG
593 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
594 #undef TARGET_FUNCTION_ARG_BOUNDARY
595 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
596
597 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
598 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
599 #undef TARGET_STRICT_ARGUMENT_NAMING
600 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
601
602 #undef TARGET_EXPAND_BUILTIN_VA_START
603 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
604 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
605 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
606
607 #undef TARGET_VECTOR_MODE_SUPPORTED_P
608 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
609
610 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
611 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
612
613 #ifdef SUBTARGET_INSERT_ATTRIBUTES
614 #undef TARGET_INSERT_ATTRIBUTES
615 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
616 #endif
617
618 #ifdef SUBTARGET_ATTRIBUTE_TABLE
619 #undef TARGET_ATTRIBUTE_TABLE
620 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
621 #endif
622
623 #undef TARGET_RELAXED_ORDERING
624 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
625
626 #undef TARGET_OPTION_OVERRIDE
627 #define TARGET_OPTION_OVERRIDE sparc_option_override
628
629 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
630 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
631 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
632 #endif
633
634 #undef TARGET_ASM_FILE_END
635 #define TARGET_ASM_FILE_END sparc_file_end
636
637 #undef TARGET_FRAME_POINTER_REQUIRED
638 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
639
640 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
641 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE sparc_builtin_setjmp_frame_value
642
643 #undef TARGET_CAN_ELIMINATE
644 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
645
646 #undef TARGET_PREFERRED_RELOAD_CLASS
647 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
648
649 #undef TARGET_CONDITIONAL_REGISTER_USAGE
650 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
651
652 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
653 #undef TARGET_MANGLE_TYPE
654 #define TARGET_MANGLE_TYPE sparc_mangle_type
655 #endif
656
657 #undef TARGET_LEGITIMATE_ADDRESS_P
658 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
659
660 #undef TARGET_LEGITIMATE_CONSTANT_P
661 #define TARGET_LEGITIMATE_CONSTANT_P sparc_legitimate_constant_p
662
663 #undef TARGET_TRAMPOLINE_INIT
664 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
665
666 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
667 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P sparc_print_operand_punct_valid_p
668 #undef TARGET_PRINT_OPERAND
669 #define TARGET_PRINT_OPERAND sparc_print_operand
670 #undef TARGET_PRINT_OPERAND_ADDRESS
671 #define TARGET_PRINT_OPERAND_ADDRESS sparc_print_operand_address
672
673 struct gcc_target targetm = TARGET_INITIALIZER;
674
675 /* Validate and override various options, and do some machine dependent
676 initialization. */
677
678 static void
679 sparc_option_override (void)
680 {
681 static struct code_model {
682 const char *const name;
683 const enum cmodel value;
684 } const cmodels[] = {
685 { "32", CM_32 },
686 { "medlow", CM_MEDLOW },
687 { "medmid", CM_MEDMID },
688 { "medany", CM_MEDANY },
689 { "embmedany", CM_EMBMEDANY },
690 { NULL, (enum cmodel) 0 }
691 };
692 const struct code_model *cmodel;
693 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
694 static struct cpu_default {
695 const int cpu;
696 const enum processor_type processor;
697 } const cpu_default[] = {
698 /* There must be one entry here for each TARGET_CPU value. */
699 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
700 { TARGET_CPU_v8, PROCESSOR_V8 },
701 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
702 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
703 { TARGET_CPU_leon, PROCESSOR_LEON },
704 { TARGET_CPU_sparclite, PROCESSOR_F930 },
705 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
706 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
707 { TARGET_CPU_v9, PROCESSOR_V9 },
708 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
709 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
710 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
711 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
712 { -1, PROCESSOR_V7 }
713 };
714 const struct cpu_default *def;
715 /* Table of values for -m{cpu,tune}=. This must match the order of
716 the PROCESSOR_* enumeration. */
717 static struct cpu_table {
718 const int disable;
719 const int enable;
720 } const cpu_table[] = {
721 { MASK_ISA, 0 },
722 { MASK_ISA, 0 },
723 { MASK_ISA, MASK_V8 },
724 /* TI TMS390Z55 supersparc */
725 { MASK_ISA, MASK_V8 },
726 { MASK_ISA, MASK_V8|MASK_FPU },
727 /* LEON */
728 { MASK_ISA, MASK_V8|MASK_FPU },
729 { MASK_ISA, MASK_SPARCLITE },
730 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
731 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
732 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
733 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
734 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
735 { MASK_ISA, MASK_SPARCLET },
736 /* TEMIC sparclet */
737 { MASK_ISA, MASK_SPARCLET },
738 { MASK_ISA, MASK_V9 },
739 /* UltraSPARC I, II, IIi */
740 { MASK_ISA,
741 /* Although insns using %y are deprecated, it is a clear win. */
742 MASK_V9|MASK_DEPRECATED_V8_INSNS},
743 /* UltraSPARC III */
744 /* ??? Check if %y issue still holds true. */
745 { MASK_ISA,
746 MASK_V9|MASK_DEPRECATED_V8_INSNS},
747 /* UltraSPARC T1 */
748 { MASK_ISA,
749 MASK_V9|MASK_DEPRECATED_V8_INSNS},
750 /* UltraSPARC T2 */
751 { MASK_ISA, MASK_V9},
752 };
753 const struct cpu_table *cpu;
754 unsigned int i;
755 int fpu;
756
757 #ifdef SUBTARGET_OVERRIDE_OPTIONS
758 SUBTARGET_OVERRIDE_OPTIONS;
759 #endif
760
761 #ifndef SPARC_BI_ARCH
762 /* Check for unsupported architecture size. */
763 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
764 error ("%s is not supported by this configuration",
765 DEFAULT_ARCH32_P ? "-m64" : "-m32");
766 #endif
767
768 /* We force all 64bit archs to use 128 bit long double */
769 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
770 {
771 error ("-mlong-double-64 not allowed with -m64");
772 target_flags |= MASK_LONG_DOUBLE_128;
773 }
774
775 /* Code model selection. */
776 sparc_cmodel = SPARC_DEFAULT_CMODEL;
777
778 #ifdef SPARC_BI_ARCH
779 if (TARGET_ARCH32)
780 sparc_cmodel = CM_32;
781 #endif
782
783 if (sparc_cmodel_string != NULL)
784 {
785 if (TARGET_ARCH64)
786 {
787 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
788 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
789 break;
790 if (cmodel->name == NULL)
791 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
792 else
793 sparc_cmodel = cmodel->value;
794 }
795 else
796 error ("-mcmodel= is not supported on 32 bit systems");
797 }
798
799 /* Check that -fcall-saved-REG wasn't specified for out registers. */
800 for (i = 8; i < 16; i++)
801 if (!call_used_regs [i])
802 {
803 error ("-fcall-saved-REG is not supported for out registers");
804 call_used_regs [i] = 1;
805 }
806
807 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
808
809 /* Set the default CPU. */
810 if (!global_options_set.x_sparc_cpu_and_features)
811 {
812 for (def = &cpu_default[0]; def->cpu != -1; ++def)
813 if (def->cpu == TARGET_CPU_DEFAULT)
814 break;
815 gcc_assert (def->cpu != -1);
816 sparc_cpu_and_features = def->processor;
817 }
818 if (!global_options_set.x_sparc_cpu)
819 sparc_cpu = sparc_cpu_and_features;
820
821 cpu = &cpu_table[(int) sparc_cpu_and_features];
822 target_flags &= ~cpu->disable;
823 target_flags |= cpu->enable;
824
825 /* If -mfpu or -mno-fpu was explicitly used, don't override with
826 the processor default. */
827 if (target_flags_explicit & MASK_FPU)
828 target_flags = (target_flags & ~MASK_FPU) | fpu;
829
830 /* Don't allow -mvis if FPU is disabled. */
831 if (! TARGET_FPU)
832 target_flags &= ~MASK_VIS;
833
834 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
835 are available.
836 -m64 also implies v9. */
837 if (TARGET_VIS || TARGET_ARCH64)
838 {
839 target_flags |= MASK_V9;
840 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
841 }
842
843 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
844 if (TARGET_V9 && TARGET_ARCH32)
845 target_flags |= MASK_DEPRECATED_V8_INSNS;
846
847 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
848 if (! TARGET_V9 || TARGET_ARCH64)
849 target_flags &= ~MASK_V8PLUS;
850
851 /* Don't use stack biasing in 32 bit mode. */
852 if (TARGET_ARCH32)
853 target_flags &= ~MASK_STACK_BIAS;
854
855 /* Supply a default value for align_functions. */
856 if (align_functions == 0
857 && (sparc_cpu == PROCESSOR_ULTRASPARC
858 || sparc_cpu == PROCESSOR_ULTRASPARC3
859 || sparc_cpu == PROCESSOR_NIAGARA
860 || sparc_cpu == PROCESSOR_NIAGARA2))
861 align_functions = 32;
862
863 /* Validate PCC_STRUCT_RETURN. */
864 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
865 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
866
867 /* Only use .uaxword when compiling for a 64-bit target. */
868 if (!TARGET_ARCH64)
869 targetm.asm_out.unaligned_op.di = NULL;
870
871 /* Do various machine dependent initializations. */
872 sparc_init_modes ();
873
874 /* Set up function hooks. */
875 init_machine_status = sparc_init_machine_status;
876
877 switch (sparc_cpu)
878 {
879 case PROCESSOR_V7:
880 case PROCESSOR_CYPRESS:
881 sparc_costs = &cypress_costs;
882 break;
883 case PROCESSOR_V8:
884 case PROCESSOR_SPARCLITE:
885 case PROCESSOR_SUPERSPARC:
886 sparc_costs = &supersparc_costs;
887 break;
888 case PROCESSOR_F930:
889 case PROCESSOR_F934:
890 case PROCESSOR_HYPERSPARC:
891 case PROCESSOR_SPARCLITE86X:
892 sparc_costs = &hypersparc_costs;
893 break;
894 case PROCESSOR_LEON:
895 sparc_costs = &leon_costs;
896 break;
897 case PROCESSOR_SPARCLET:
898 case PROCESSOR_TSC701:
899 sparc_costs = &sparclet_costs;
900 break;
901 case PROCESSOR_V9:
902 case PROCESSOR_ULTRASPARC:
903 sparc_costs = &ultrasparc_costs;
904 break;
905 case PROCESSOR_ULTRASPARC3:
906 sparc_costs = &ultrasparc3_costs;
907 break;
908 case PROCESSOR_NIAGARA:
909 sparc_costs = &niagara_costs;
910 break;
911 case PROCESSOR_NIAGARA2:
912 sparc_costs = &niagara2_costs;
913 break;
914 case PROCESSOR_NATIVE:
915 gcc_unreachable ();
916 };
917
918 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
919 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
920 target_flags |= MASK_LONG_DOUBLE_128;
921 #endif
922
923 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
924 ((sparc_cpu == PROCESSOR_ULTRASPARC
925 || sparc_cpu == PROCESSOR_NIAGARA
926 || sparc_cpu == PROCESSOR_NIAGARA2)
927 ? 2
928 : (sparc_cpu == PROCESSOR_ULTRASPARC3
929 ? 8 : 3)),
930 global_options.x_param_values,
931 global_options_set.x_param_values);
932 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
933 ((sparc_cpu == PROCESSOR_ULTRASPARC
934 || sparc_cpu == PROCESSOR_ULTRASPARC3
935 || sparc_cpu == PROCESSOR_NIAGARA
936 || sparc_cpu == PROCESSOR_NIAGARA2)
937 ? 64 : 32),
938 global_options.x_param_values,
939 global_options_set.x_param_values);
940
941 /* Disable save slot sharing for call-clobbered registers by default.
942 The IRA sharing algorithm works on single registers only and this
943 pessimizes for double floating-point registers. */
944 if (!global_options_set.x_flag_ira_share_save_slots)
945 flag_ira_share_save_slots = 0;
946 }
947 \f
948 /* Miscellaneous utilities. */
949
950 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
951 or branch on register contents instructions. */
952
953 int
954 v9_regcmp_p (enum rtx_code code)
955 {
956 return (code == EQ || code == NE || code == GE || code == LT
957 || code == LE || code == GT);
958 }
959
960 /* Nonzero if OP is a floating point constant which can
961 be loaded into an integer register using a single
962 sethi instruction. */
963
964 int
965 fp_sethi_p (rtx op)
966 {
967 if (GET_CODE (op) == CONST_DOUBLE)
968 {
969 REAL_VALUE_TYPE r;
970 long i;
971
972 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
973 REAL_VALUE_TO_TARGET_SINGLE (r, i);
974 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
975 }
976
977 return 0;
978 }
979
980 /* Nonzero if OP is a floating point constant which can
981 be loaded into an integer register using a single
982 mov instruction. */
983
984 int
985 fp_mov_p (rtx op)
986 {
987 if (GET_CODE (op) == CONST_DOUBLE)
988 {
989 REAL_VALUE_TYPE r;
990 long i;
991
992 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
993 REAL_VALUE_TO_TARGET_SINGLE (r, i);
994 return SPARC_SIMM13_P (i);
995 }
996
997 return 0;
998 }
999
1000 /* Nonzero if OP is a floating point constant which can
1001 be loaded into an integer register using a high/losum
1002 instruction sequence. */
1003
1004 int
1005 fp_high_losum_p (rtx op)
1006 {
1007 /* The constraints calling this should only be in
1008 SFmode move insns, so any constant which cannot
1009 be moved using a single insn will do. */
1010 if (GET_CODE (op) == CONST_DOUBLE)
1011 {
1012 REAL_VALUE_TYPE r;
1013 long i;
1014
1015 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
1016 REAL_VALUE_TO_TARGET_SINGLE (r, i);
1017 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
1018 }
1019
1020 return 0;
1021 }
1022
1023 /* Return true if the address of LABEL can be loaded by means of the
1024 mov{si,di}_pic_label_ref patterns in PIC mode. */
1025
1026 static bool
1027 can_use_mov_pic_label_ref (rtx label)
1028 {
1029 /* VxWorks does not impose a fixed gap between segments; the run-time
1030 gap can be different from the object-file gap. We therefore can't
1031 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1032 are absolutely sure that X is in the same segment as the GOT.
1033 Unfortunately, the flexibility of linker scripts means that we
1034 can't be sure of that in general, so assume that GOT-relative
1035 accesses are never valid on VxWorks. */
1036 if (TARGET_VXWORKS_RTP)
1037 return false;
1038
1039 /* Similarly, if the label is non-local, it might end up being placed
1040 in a different section than the current one; now mov_pic_label_ref
1041 requires the label and the code to be in the same section. */
1042 if (LABEL_REF_NONLOCAL_P (label))
1043 return false;
1044
1045 /* Finally, if we are reordering basic blocks and partition into hot
1046 and cold sections, this might happen for any label. */
1047 if (flag_reorder_blocks_and_partition)
1048 return false;
1049
1050 return true;
1051 }
1052
1053 /* Expand a move instruction. Return true if all work is done. */
1054
1055 bool
1056 sparc_expand_move (enum machine_mode mode, rtx *operands)
1057 {
1058 /* Handle sets of MEM first. */
1059 if (GET_CODE (operands[0]) == MEM)
1060 {
1061 /* 0 is a register (or a pair of registers) on SPARC. */
1062 if (register_or_zero_operand (operands[1], mode))
1063 return false;
1064
1065 if (!reload_in_progress)
1066 {
1067 operands[0] = validize_mem (operands[0]);
1068 operands[1] = force_reg (mode, operands[1]);
1069 }
1070 }
1071
1072 /* Fixup TLS cases. */
1073 if (TARGET_HAVE_TLS
1074 && CONSTANT_P (operands[1])
1075 && sparc_tls_referenced_p (operands [1]))
1076 {
1077 operands[1] = sparc_legitimize_tls_address (operands[1]);
1078 return false;
1079 }
1080
1081 /* Fixup PIC cases. */
1082 if (flag_pic && CONSTANT_P (operands[1]))
1083 {
1084 if (pic_address_needs_scratch (operands[1]))
1085 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1086
1087 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1088 if (GET_CODE (operands[1]) == LABEL_REF
1089 && can_use_mov_pic_label_ref (operands[1]))
1090 {
1091 if (mode == SImode)
1092 {
1093 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1094 return true;
1095 }
1096
1097 if (mode == DImode)
1098 {
1099 gcc_assert (TARGET_ARCH64);
1100 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1101 return true;
1102 }
1103 }
1104
1105 if (symbolic_operand (operands[1], mode))
1106 {
1107 operands[1]
1108 = sparc_legitimize_pic_address (operands[1],
1109 reload_in_progress
1110 ? operands[0] : NULL_RTX);
1111 return false;
1112 }
1113 }
1114
1115 /* If we are trying to toss an integer constant into FP registers,
1116 or loading a FP or vector constant, force it into memory. */
1117 if (CONSTANT_P (operands[1])
1118 && REG_P (operands[0])
1119 && (SPARC_FP_REG_P (REGNO (operands[0]))
1120 || SCALAR_FLOAT_MODE_P (mode)
1121 || VECTOR_MODE_P (mode)))
1122 {
1123 /* emit_group_store will send such bogosity to us when it is
1124 not storing directly into memory. So fix this up to avoid
1125 crashes in output_constant_pool. */
1126 if (operands [1] == const0_rtx)
1127 operands[1] = CONST0_RTX (mode);
1128
1129 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1130 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1131 && const_zero_operand (operands[1], mode))
1132 return false;
1133
1134 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1135 /* We are able to build any SF constant in integer registers
1136 with at most 2 instructions. */
1137 && (mode == SFmode
1138 /* And any DF constant in integer registers. */
1139 || (mode == DFmode
1140 && (reload_completed || reload_in_progress))))
1141 return false;
1142
1143 operands[1] = force_const_mem (mode, operands[1]);
1144 if (!reload_in_progress)
1145 operands[1] = validize_mem (operands[1]);
1146 return false;
1147 }
1148
1149 /* Accept non-constants and valid constants unmodified. */
1150 if (!CONSTANT_P (operands[1])
1151 || GET_CODE (operands[1]) == HIGH
1152 || input_operand (operands[1], mode))
1153 return false;
1154
1155 switch (mode)
1156 {
1157 case QImode:
1158 /* All QImode constants require only one insn, so proceed. */
1159 break;
1160
1161 case HImode:
1162 case SImode:
1163 sparc_emit_set_const32 (operands[0], operands[1]);
1164 return true;
1165
1166 case DImode:
1167 /* input_operand should have filtered out 32-bit mode. */
1168 sparc_emit_set_const64 (operands[0], operands[1]);
1169 return true;
1170
1171 default:
1172 gcc_unreachable ();
1173 }
1174
1175 return false;
1176 }
1177
1178 /* Load OP1, a 32-bit constant, into OP0, a register.
1179 We know it can't be done in one insn when we get
1180 here, the move expander guarantees this. */
1181
1182 static void
1183 sparc_emit_set_const32 (rtx op0, rtx op1)
1184 {
1185 enum machine_mode mode = GET_MODE (op0);
1186 rtx temp;
1187
1188 if (reload_in_progress || reload_completed)
1189 temp = op0;
1190 else
1191 temp = gen_reg_rtx (mode);
1192
1193 if (GET_CODE (op1) == CONST_INT)
1194 {
1195 gcc_assert (!small_int_operand (op1, mode)
1196 && !const_high_operand (op1, mode));
1197
1198 /* Emit them as real moves instead of a HIGH/LO_SUM,
1199 this way CSE can see everything and reuse intermediate
1200 values if it wants. */
1201 emit_insn (gen_rtx_SET (VOIDmode, temp,
1202 GEN_INT (INTVAL (op1)
1203 & ~(HOST_WIDE_INT)0x3ff)));
1204
1205 emit_insn (gen_rtx_SET (VOIDmode,
1206 op0,
1207 gen_rtx_IOR (mode, temp,
1208 GEN_INT (INTVAL (op1) & 0x3ff))));
1209 }
1210 else
1211 {
1212 /* A symbol, emit in the traditional way. */
1213 emit_insn (gen_rtx_SET (VOIDmode, temp,
1214 gen_rtx_HIGH (mode, op1)));
1215 emit_insn (gen_rtx_SET (VOIDmode,
1216 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1217 }
1218 }
1219
1220 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1221 If TEMP is nonzero, we are forbidden to use any other scratch
1222 registers. Otherwise, we are allowed to generate them as needed.
1223
1224 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1225 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1226
1227 void
1228 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1229 {
1230 rtx temp1, temp2, temp3, temp4, temp5;
1231 rtx ti_temp = 0;
1232
1233 if (temp && GET_MODE (temp) == TImode)
1234 {
1235 ti_temp = temp;
1236 temp = gen_rtx_REG (DImode, REGNO (temp));
1237 }
1238
1239 /* SPARC-V9 code-model support. */
1240 switch (sparc_cmodel)
1241 {
1242 case CM_MEDLOW:
1243 /* The range spanned by all instructions in the object is less
1244 than 2^31 bytes (2GB) and the distance from any instruction
1245 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1246 than 2^31 bytes (2GB).
1247
1248 The executable must be in the low 4TB of the virtual address
1249 space.
1250
1251 sethi %hi(symbol), %temp1
1252 or %temp1, %lo(symbol), %reg */
1253 if (temp)
1254 temp1 = temp; /* op0 is allowed. */
1255 else
1256 temp1 = gen_reg_rtx (DImode);
1257
1258 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1259 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1260 break;
1261
1262 case CM_MEDMID:
1263 /* The range spanned by all instructions in the object is less
1264 than 2^31 bytes (2GB) and the distance from any instruction
1265 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1266 than 2^31 bytes (2GB).
1267
1268 The executable must be in the low 16TB of the virtual address
1269 space.
1270
1271 sethi %h44(symbol), %temp1
1272 or %temp1, %m44(symbol), %temp2
1273 sllx %temp2, 12, %temp3
1274 or %temp3, %l44(symbol), %reg */
1275 if (temp)
1276 {
1277 temp1 = op0;
1278 temp2 = op0;
1279 temp3 = temp; /* op0 is allowed. */
1280 }
1281 else
1282 {
1283 temp1 = gen_reg_rtx (DImode);
1284 temp2 = gen_reg_rtx (DImode);
1285 temp3 = gen_reg_rtx (DImode);
1286 }
1287
1288 emit_insn (gen_seth44 (temp1, op1));
1289 emit_insn (gen_setm44 (temp2, temp1, op1));
1290 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1291 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1292 emit_insn (gen_setl44 (op0, temp3, op1));
1293 break;
1294
1295 case CM_MEDANY:
1296 /* The range spanned by all instructions in the object is less
1297 than 2^31 bytes (2GB) and the distance from any instruction
1298 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1299 than 2^31 bytes (2GB).
1300
1301 The executable can be placed anywhere in the virtual address
1302 space.
1303
1304 sethi %hh(symbol), %temp1
1305 sethi %lm(symbol), %temp2
1306 or %temp1, %hm(symbol), %temp3
1307 sllx %temp3, 32, %temp4
1308 or %temp4, %temp2, %temp5
1309 or %temp5, %lo(symbol), %reg */
1310 if (temp)
1311 {
1312 /* It is possible that one of the registers we got for operands[2]
1313 might coincide with that of operands[0] (which is why we made
1314 it TImode). Pick the other one to use as our scratch. */
1315 if (rtx_equal_p (temp, op0))
1316 {
1317 gcc_assert (ti_temp);
1318 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1319 }
1320 temp1 = op0;
1321 temp2 = temp; /* op0 is _not_ allowed, see above. */
1322 temp3 = op0;
1323 temp4 = op0;
1324 temp5 = op0;
1325 }
1326 else
1327 {
1328 temp1 = gen_reg_rtx (DImode);
1329 temp2 = gen_reg_rtx (DImode);
1330 temp3 = gen_reg_rtx (DImode);
1331 temp4 = gen_reg_rtx (DImode);
1332 temp5 = gen_reg_rtx (DImode);
1333 }
1334
1335 emit_insn (gen_sethh (temp1, op1));
1336 emit_insn (gen_setlm (temp2, op1));
1337 emit_insn (gen_sethm (temp3, temp1, op1));
1338 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1339 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1340 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1341 gen_rtx_PLUS (DImode, temp4, temp2)));
1342 emit_insn (gen_setlo (op0, temp5, op1));
1343 break;
1344
1345 case CM_EMBMEDANY:
1346 /* Old old old backwards compatibility kruft here.
1347 Essentially it is MEDLOW with a fixed 64-bit
1348 virtual base added to all data segment addresses.
1349 Text-segment stuff is computed like MEDANY, we can't
1350 reuse the code above because the relocation knobs
1351 look different.
1352
1353 Data segment: sethi %hi(symbol), %temp1
1354 add %temp1, EMBMEDANY_BASE_REG, %temp2
1355 or %temp2, %lo(symbol), %reg */
1356 if (data_segment_operand (op1, GET_MODE (op1)))
1357 {
1358 if (temp)
1359 {
1360 temp1 = temp; /* op0 is allowed. */
1361 temp2 = op0;
1362 }
1363 else
1364 {
1365 temp1 = gen_reg_rtx (DImode);
1366 temp2 = gen_reg_rtx (DImode);
1367 }
1368
1369 emit_insn (gen_embmedany_sethi (temp1, op1));
1370 emit_insn (gen_embmedany_brsum (temp2, temp1));
1371 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1372 }
1373
1374 /* Text segment: sethi %uhi(symbol), %temp1
1375 sethi %hi(symbol), %temp2
1376 or %temp1, %ulo(symbol), %temp3
1377 sllx %temp3, 32, %temp4
1378 or %temp4, %temp2, %temp5
1379 or %temp5, %lo(symbol), %reg */
1380 else
1381 {
1382 if (temp)
1383 {
1384 /* It is possible that one of the registers we got for operands[2]
1385 might coincide with that of operands[0] (which is why we made
1386 it TImode). Pick the other one to use as our scratch. */
1387 if (rtx_equal_p (temp, op0))
1388 {
1389 gcc_assert (ti_temp);
1390 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1391 }
1392 temp1 = op0;
1393 temp2 = temp; /* op0 is _not_ allowed, see above. */
1394 temp3 = op0;
1395 temp4 = op0;
1396 temp5 = op0;
1397 }
1398 else
1399 {
1400 temp1 = gen_reg_rtx (DImode);
1401 temp2 = gen_reg_rtx (DImode);
1402 temp3 = gen_reg_rtx (DImode);
1403 temp4 = gen_reg_rtx (DImode);
1404 temp5 = gen_reg_rtx (DImode);
1405 }
1406
1407 emit_insn (gen_embmedany_textuhi (temp1, op1));
1408 emit_insn (gen_embmedany_texthi (temp2, op1));
1409 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1410 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1411 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1412 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1413 gen_rtx_PLUS (DImode, temp4, temp2)));
1414 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1415 }
1416 break;
1417
1418 default:
1419 gcc_unreachable ();
1420 }
1421 }
1422
1423 #if HOST_BITS_PER_WIDE_INT == 32
1424 static void
1425 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1426 {
1427 gcc_unreachable ();
1428 }
1429 #else
1430 /* These avoid problems when cross compiling. If we do not
1431 go through all this hair then the optimizer will see
1432 invalid REG_EQUAL notes or in some cases none at all. */
1433 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1434 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1435 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1436 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1437
1438 /* The optimizer is not to assume anything about exactly
1439 which bits are set for a HIGH, they are unspecified.
1440 Unfortunately this leads to many missed optimizations
1441 during CSE. We mask out the non-HIGH bits, and matches
1442 a plain movdi, to alleviate this problem. */
1443 static rtx
1444 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1445 {
1446 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1447 }
1448
1449 static rtx
1450 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1451 {
1452 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1453 }
1454
1455 static rtx
1456 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1457 {
1458 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1459 }
1460
1461 static rtx
1462 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1463 {
1464 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1465 }
1466
1467 /* Worker routines for 64-bit constant formation on arch64.
1468 One of the key things to be doing in these emissions is
1469 to create as many temp REGs as possible. This makes it
1470 possible for half-built constants to be used later when
1471 such values are similar to something required later on.
1472 Without doing this, the optimizer cannot see such
1473 opportunities. */
1474
1475 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1476 unsigned HOST_WIDE_INT, int);
1477
1478 static void
1479 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1480 unsigned HOST_WIDE_INT low_bits, int is_neg)
1481 {
1482 unsigned HOST_WIDE_INT high_bits;
1483
1484 if (is_neg)
1485 high_bits = (~low_bits) & 0xffffffff;
1486 else
1487 high_bits = low_bits;
1488
1489 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1490 if (!is_neg)
1491 {
1492 emit_insn (gen_rtx_SET (VOIDmode, op0,
1493 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1494 }
1495 else
1496 {
1497 /* If we are XOR'ing with -1, then we should emit a one's complement
1498 instead. This way the combiner will notice logical operations
1499 such as ANDN later on and substitute. */
1500 if ((low_bits & 0x3ff) == 0x3ff)
1501 {
1502 emit_insn (gen_rtx_SET (VOIDmode, op0,
1503 gen_rtx_NOT (DImode, temp)));
1504 }
1505 else
1506 {
1507 emit_insn (gen_rtx_SET (VOIDmode, op0,
1508 gen_safe_XOR64 (temp,
1509 (-(HOST_WIDE_INT)0x400
1510 | (low_bits & 0x3ff)))));
1511 }
1512 }
1513 }
1514
1515 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1516 unsigned HOST_WIDE_INT, int);
1517
1518 static void
1519 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1520 unsigned HOST_WIDE_INT high_bits,
1521 unsigned HOST_WIDE_INT low_immediate,
1522 int shift_count)
1523 {
1524 rtx temp2 = op0;
1525
1526 if ((high_bits & 0xfffffc00) != 0)
1527 {
1528 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1529 if ((high_bits & ~0xfffffc00) != 0)
1530 emit_insn (gen_rtx_SET (VOIDmode, op0,
1531 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1532 else
1533 temp2 = temp;
1534 }
1535 else
1536 {
1537 emit_insn (gen_safe_SET64 (temp, high_bits));
1538 temp2 = temp;
1539 }
1540
1541 /* Now shift it up into place. */
1542 emit_insn (gen_rtx_SET (VOIDmode, op0,
1543 gen_rtx_ASHIFT (DImode, temp2,
1544 GEN_INT (shift_count))));
1545
1546 /* If there is a low immediate part piece, finish up by
1547 putting that in as well. */
1548 if (low_immediate != 0)
1549 emit_insn (gen_rtx_SET (VOIDmode, op0,
1550 gen_safe_OR64 (op0, low_immediate)));
1551 }
1552
1553 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1554 unsigned HOST_WIDE_INT);
1555
1556 /* Full 64-bit constant decomposition. Even though this is the
1557 'worst' case, we still optimize a few things away. */
1558 static void
1559 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1560 unsigned HOST_WIDE_INT high_bits,
1561 unsigned HOST_WIDE_INT low_bits)
1562 {
1563 rtx sub_temp;
1564
1565 if (reload_in_progress || reload_completed)
1566 sub_temp = op0;
1567 else
1568 sub_temp = gen_reg_rtx (DImode);
1569
1570 if ((high_bits & 0xfffffc00) != 0)
1571 {
1572 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1573 if ((high_bits & ~0xfffffc00) != 0)
1574 emit_insn (gen_rtx_SET (VOIDmode,
1575 sub_temp,
1576 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1577 else
1578 sub_temp = temp;
1579 }
1580 else
1581 {
1582 emit_insn (gen_safe_SET64 (temp, high_bits));
1583 sub_temp = temp;
1584 }
1585
1586 if (!reload_in_progress && !reload_completed)
1587 {
1588 rtx temp2 = gen_reg_rtx (DImode);
1589 rtx temp3 = gen_reg_rtx (DImode);
1590 rtx temp4 = gen_reg_rtx (DImode);
1591
1592 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1593 gen_rtx_ASHIFT (DImode, sub_temp,
1594 GEN_INT (32))));
1595
1596 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1597 if ((low_bits & ~0xfffffc00) != 0)
1598 {
1599 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1600 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1601 emit_insn (gen_rtx_SET (VOIDmode, op0,
1602 gen_rtx_PLUS (DImode, temp4, temp3)));
1603 }
1604 else
1605 {
1606 emit_insn (gen_rtx_SET (VOIDmode, op0,
1607 gen_rtx_PLUS (DImode, temp4, temp2)));
1608 }
1609 }
1610 else
1611 {
1612 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1613 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1614 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1615 int to_shift = 12;
1616
1617 /* We are in the middle of reload, so this is really
1618 painful. However we do still make an attempt to
1619 avoid emitting truly stupid code. */
1620 if (low1 != const0_rtx)
1621 {
1622 emit_insn (gen_rtx_SET (VOIDmode, op0,
1623 gen_rtx_ASHIFT (DImode, sub_temp,
1624 GEN_INT (to_shift))));
1625 emit_insn (gen_rtx_SET (VOIDmode, op0,
1626 gen_rtx_IOR (DImode, op0, low1)));
1627 sub_temp = op0;
1628 to_shift = 12;
1629 }
1630 else
1631 {
1632 to_shift += 12;
1633 }
1634 if (low2 != const0_rtx)
1635 {
1636 emit_insn (gen_rtx_SET (VOIDmode, op0,
1637 gen_rtx_ASHIFT (DImode, sub_temp,
1638 GEN_INT (to_shift))));
1639 emit_insn (gen_rtx_SET (VOIDmode, op0,
1640 gen_rtx_IOR (DImode, op0, low2)));
1641 sub_temp = op0;
1642 to_shift = 8;
1643 }
1644 else
1645 {
1646 to_shift += 8;
1647 }
1648 emit_insn (gen_rtx_SET (VOIDmode, op0,
1649 gen_rtx_ASHIFT (DImode, sub_temp,
1650 GEN_INT (to_shift))));
1651 if (low3 != const0_rtx)
1652 emit_insn (gen_rtx_SET (VOIDmode, op0,
1653 gen_rtx_IOR (DImode, op0, low3)));
1654 /* phew... */
1655 }
1656 }
1657
1658 /* Analyze a 64-bit constant for certain properties. */
1659 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1660 unsigned HOST_WIDE_INT,
1661 int *, int *, int *);
1662
1663 static void
1664 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1665 unsigned HOST_WIDE_INT low_bits,
1666 int *hbsp, int *lbsp, int *abbasp)
1667 {
1668 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1669 int i;
1670
1671 lowest_bit_set = highest_bit_set = -1;
1672 i = 0;
1673 do
1674 {
1675 if ((lowest_bit_set == -1)
1676 && ((low_bits >> i) & 1))
1677 lowest_bit_set = i;
1678 if ((highest_bit_set == -1)
1679 && ((high_bits >> (32 - i - 1)) & 1))
1680 highest_bit_set = (64 - i - 1);
1681 }
1682 while (++i < 32
1683 && ((highest_bit_set == -1)
1684 || (lowest_bit_set == -1)));
1685 if (i == 32)
1686 {
1687 i = 0;
1688 do
1689 {
1690 if ((lowest_bit_set == -1)
1691 && ((high_bits >> i) & 1))
1692 lowest_bit_set = i + 32;
1693 if ((highest_bit_set == -1)
1694 && ((low_bits >> (32 - i - 1)) & 1))
1695 highest_bit_set = 32 - i - 1;
1696 }
1697 while (++i < 32
1698 && ((highest_bit_set == -1)
1699 || (lowest_bit_set == -1)));
1700 }
1701 /* If there are no bits set this should have gone out
1702 as one instruction! */
1703 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1704 all_bits_between_are_set = 1;
1705 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1706 {
1707 if (i < 32)
1708 {
1709 if ((low_bits & (1 << i)) != 0)
1710 continue;
1711 }
1712 else
1713 {
1714 if ((high_bits & (1 << (i - 32))) != 0)
1715 continue;
1716 }
1717 all_bits_between_are_set = 0;
1718 break;
1719 }
1720 *hbsp = highest_bit_set;
1721 *lbsp = lowest_bit_set;
1722 *abbasp = all_bits_between_are_set;
1723 }
1724
1725 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1726
1727 static int
1728 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1729 unsigned HOST_WIDE_INT low_bits)
1730 {
1731 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1732
1733 if (high_bits == 0
1734 || high_bits == 0xffffffff)
1735 return 1;
1736
1737 analyze_64bit_constant (high_bits, low_bits,
1738 &highest_bit_set, &lowest_bit_set,
1739 &all_bits_between_are_set);
1740
1741 if ((highest_bit_set == 63
1742 || lowest_bit_set == 0)
1743 && all_bits_between_are_set != 0)
1744 return 1;
1745
1746 if ((highest_bit_set - lowest_bit_set) < 21)
1747 return 1;
1748
1749 return 0;
1750 }
1751
1752 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1753 unsigned HOST_WIDE_INT,
1754 int, int);
1755
1756 static unsigned HOST_WIDE_INT
1757 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1758 unsigned HOST_WIDE_INT low_bits,
1759 int lowest_bit_set, int shift)
1760 {
1761 HOST_WIDE_INT hi, lo;
1762
1763 if (lowest_bit_set < 32)
1764 {
1765 lo = (low_bits >> lowest_bit_set) << shift;
1766 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1767 }
1768 else
1769 {
1770 lo = 0;
1771 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1772 }
1773 gcc_assert (! (hi & lo));
1774 return (hi | lo);
1775 }
1776
1777 /* Here we are sure to be arch64 and this is an integer constant
1778 being loaded into a register. Emit the most efficient
1779 insn sequence possible. Detection of all the 1-insn cases
1780 has been done already. */
1781 static void
1782 sparc_emit_set_const64 (rtx op0, rtx op1)
1783 {
1784 unsigned HOST_WIDE_INT high_bits, low_bits;
1785 int lowest_bit_set, highest_bit_set;
1786 int all_bits_between_are_set;
1787 rtx temp = 0;
1788
1789 /* Sanity check that we know what we are working with. */
1790 gcc_assert (TARGET_ARCH64
1791 && (GET_CODE (op0) == SUBREG
1792 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1793
1794 if (reload_in_progress || reload_completed)
1795 temp = op0;
1796
1797 if (GET_CODE (op1) != CONST_INT)
1798 {
1799 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1800 return;
1801 }
1802
1803 if (! temp)
1804 temp = gen_reg_rtx (DImode);
1805
1806 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1807 low_bits = (INTVAL (op1) & 0xffffffff);
1808
1809 /* low_bits bits 0 --> 31
1810 high_bits bits 32 --> 63 */
1811
1812 analyze_64bit_constant (high_bits, low_bits,
1813 &highest_bit_set, &lowest_bit_set,
1814 &all_bits_between_are_set);
1815
1816 /* First try for a 2-insn sequence. */
1817
1818 /* These situations are preferred because the optimizer can
1819 * do more things with them:
1820 * 1) mov -1, %reg
1821 * sllx %reg, shift, %reg
1822 * 2) mov -1, %reg
1823 * srlx %reg, shift, %reg
1824 * 3) mov some_small_const, %reg
1825 * sllx %reg, shift, %reg
1826 */
1827 if (((highest_bit_set == 63
1828 || lowest_bit_set == 0)
1829 && all_bits_between_are_set != 0)
1830 || ((highest_bit_set - lowest_bit_set) < 12))
1831 {
1832 HOST_WIDE_INT the_const = -1;
1833 int shift = lowest_bit_set;
1834
1835 if ((highest_bit_set != 63
1836 && lowest_bit_set != 0)
1837 || all_bits_between_are_set == 0)
1838 {
1839 the_const =
1840 create_simple_focus_bits (high_bits, low_bits,
1841 lowest_bit_set, 0);
1842 }
1843 else if (lowest_bit_set == 0)
1844 shift = -(63 - highest_bit_set);
1845
1846 gcc_assert (SPARC_SIMM13_P (the_const));
1847 gcc_assert (shift != 0);
1848
1849 emit_insn (gen_safe_SET64 (temp, the_const));
1850 if (shift > 0)
1851 emit_insn (gen_rtx_SET (VOIDmode,
1852 op0,
1853 gen_rtx_ASHIFT (DImode,
1854 temp,
1855 GEN_INT (shift))));
1856 else if (shift < 0)
1857 emit_insn (gen_rtx_SET (VOIDmode,
1858 op0,
1859 gen_rtx_LSHIFTRT (DImode,
1860 temp,
1861 GEN_INT (-shift))));
1862 return;
1863 }
1864
1865 /* Now a range of 22 or less bits set somewhere.
1866 * 1) sethi %hi(focus_bits), %reg
1867 * sllx %reg, shift, %reg
1868 * 2) sethi %hi(focus_bits), %reg
1869 * srlx %reg, shift, %reg
1870 */
1871 if ((highest_bit_set - lowest_bit_set) < 21)
1872 {
1873 unsigned HOST_WIDE_INT focus_bits =
1874 create_simple_focus_bits (high_bits, low_bits,
1875 lowest_bit_set, 10);
1876
1877 gcc_assert (SPARC_SETHI_P (focus_bits));
1878 gcc_assert (lowest_bit_set != 10);
1879
1880 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1881
1882 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1883 if (lowest_bit_set < 10)
1884 emit_insn (gen_rtx_SET (VOIDmode,
1885 op0,
1886 gen_rtx_LSHIFTRT (DImode, temp,
1887 GEN_INT (10 - lowest_bit_set))));
1888 else if (lowest_bit_set > 10)
1889 emit_insn (gen_rtx_SET (VOIDmode,
1890 op0,
1891 gen_rtx_ASHIFT (DImode, temp,
1892 GEN_INT (lowest_bit_set - 10))));
1893 return;
1894 }
1895
1896 /* 1) sethi %hi(low_bits), %reg
1897 * or %reg, %lo(low_bits), %reg
1898 * 2) sethi %hi(~low_bits), %reg
1899 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1900 */
1901 if (high_bits == 0
1902 || high_bits == 0xffffffff)
1903 {
1904 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1905 (high_bits == 0xffffffff));
1906 return;
1907 }
1908
1909 /* Now, try 3-insn sequences. */
1910
1911 /* 1) sethi %hi(high_bits), %reg
1912 * or %reg, %lo(high_bits), %reg
1913 * sllx %reg, 32, %reg
1914 */
1915 if (low_bits == 0)
1916 {
1917 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1918 return;
1919 }
1920
1921 /* We may be able to do something quick
1922 when the constant is negated, so try that. */
1923 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1924 (~low_bits) & 0xfffffc00))
1925 {
1926 /* NOTE: The trailing bits get XOR'd so we need the
1927 non-negated bits, not the negated ones. */
1928 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1929
1930 if ((((~high_bits) & 0xffffffff) == 0
1931 && ((~low_bits) & 0x80000000) == 0)
1932 || (((~high_bits) & 0xffffffff) == 0xffffffff
1933 && ((~low_bits) & 0x80000000) != 0))
1934 {
1935 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1936
1937 if ((SPARC_SETHI_P (fast_int)
1938 && (~high_bits & 0xffffffff) == 0)
1939 || SPARC_SIMM13_P (fast_int))
1940 emit_insn (gen_safe_SET64 (temp, fast_int));
1941 else
1942 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1943 }
1944 else
1945 {
1946 rtx negated_const;
1947 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1948 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1949 sparc_emit_set_const64 (temp, negated_const);
1950 }
1951
1952 /* If we are XOR'ing with -1, then we should emit a one's complement
1953 instead. This way the combiner will notice logical operations
1954 such as ANDN later on and substitute. */
1955 if (trailing_bits == 0x3ff)
1956 {
1957 emit_insn (gen_rtx_SET (VOIDmode, op0,
1958 gen_rtx_NOT (DImode, temp)));
1959 }
1960 else
1961 {
1962 emit_insn (gen_rtx_SET (VOIDmode,
1963 op0,
1964 gen_safe_XOR64 (temp,
1965 (-0x400 | trailing_bits))));
1966 }
1967 return;
1968 }
1969
1970 /* 1) sethi %hi(xxx), %reg
1971 * or %reg, %lo(xxx), %reg
1972 * sllx %reg, yyy, %reg
1973 *
1974 * ??? This is just a generalized version of the low_bits==0
1975 * thing above, FIXME...
1976 */
1977 if ((highest_bit_set - lowest_bit_set) < 32)
1978 {
1979 unsigned HOST_WIDE_INT focus_bits =
1980 create_simple_focus_bits (high_bits, low_bits,
1981 lowest_bit_set, 0);
1982
1983 /* We can't get here in this state. */
1984 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1985
1986 /* So what we know is that the set bits straddle the
1987 middle of the 64-bit word. */
1988 sparc_emit_set_const64_quick2 (op0, temp,
1989 focus_bits, 0,
1990 lowest_bit_set);
1991 return;
1992 }
1993
1994 /* 1) sethi %hi(high_bits), %reg
1995 * or %reg, %lo(high_bits), %reg
1996 * sllx %reg, 32, %reg
1997 * or %reg, low_bits, %reg
1998 */
1999 if (SPARC_SIMM13_P(low_bits)
2000 && ((int)low_bits > 0))
2001 {
2002 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
2003 return;
2004 }
2005
2006 /* The easiest way when all else fails, is full decomposition. */
2007 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
2008 }
2009 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
2010
2011 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
2012 return the mode to be used for the comparison. For floating-point,
2013 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
2014 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
2015 processing is needed. */
2016
2017 enum machine_mode
2018 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
2019 {
2020 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2021 {
2022 switch (op)
2023 {
2024 case EQ:
2025 case NE:
2026 case UNORDERED:
2027 case ORDERED:
2028 case UNLT:
2029 case UNLE:
2030 case UNGT:
2031 case UNGE:
2032 case UNEQ:
2033 case LTGT:
2034 return CCFPmode;
2035
2036 case LT:
2037 case LE:
2038 case GT:
2039 case GE:
2040 return CCFPEmode;
2041
2042 default:
2043 gcc_unreachable ();
2044 }
2045 }
2046 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2047 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2048 {
2049 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2050 return CCX_NOOVmode;
2051 else
2052 return CC_NOOVmode;
2053 }
2054 else
2055 {
2056 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2057 return CCXmode;
2058 else
2059 return CCmode;
2060 }
2061 }
2062
2063 /* Emit the compare insn and return the CC reg for a CODE comparison
2064 with operands X and Y. */
2065
2066 static rtx
2067 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2068 {
2069 enum machine_mode mode;
2070 rtx cc_reg;
2071
2072 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2073 return x;
2074
2075 mode = SELECT_CC_MODE (code, x, y);
2076
2077 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2078 fcc regs (cse can't tell they're really call clobbered regs and will
2079 remove a duplicate comparison even if there is an intervening function
2080 call - it will then try to reload the cc reg via an int reg which is why
2081 we need the movcc patterns). It is possible to provide the movcc
2082 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2083 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2084 to tell cse that CCFPE mode registers (even pseudos) are call
2085 clobbered. */
2086
2087 /* ??? This is an experiment. Rather than making changes to cse which may
2088 or may not be easy/clean, we do our own cse. This is possible because
2089 we will generate hard registers. Cse knows they're call clobbered (it
2090 doesn't know the same thing about pseudos). If we guess wrong, no big
2091 deal, but if we win, great! */
2092
2093 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2094 #if 1 /* experiment */
2095 {
2096 int reg;
2097 /* We cycle through the registers to ensure they're all exercised. */
2098 static int next_fcc_reg = 0;
2099 /* Previous x,y for each fcc reg. */
2100 static rtx prev_args[4][2];
2101
2102 /* Scan prev_args for x,y. */
2103 for (reg = 0; reg < 4; reg++)
2104 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2105 break;
2106 if (reg == 4)
2107 {
2108 reg = next_fcc_reg;
2109 prev_args[reg][0] = x;
2110 prev_args[reg][1] = y;
2111 next_fcc_reg = (next_fcc_reg + 1) & 3;
2112 }
2113 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2114 }
2115 #else
2116 cc_reg = gen_reg_rtx (mode);
2117 #endif /* ! experiment */
2118 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2119 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2120 else
2121 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2122
2123 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2124 will only result in an unrecognizable insn so no point in asserting. */
2125 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2126
2127 return cc_reg;
2128 }
2129
2130
2131 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2132
2133 rtx
2134 gen_compare_reg (rtx cmp)
2135 {
2136 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2137 }
2138
2139 /* This function is used for v9 only.
2140 DEST is the target of the Scc insn.
2141 CODE is the code for an Scc's comparison.
2142 X and Y are the values we compare.
2143
2144 This function is needed to turn
2145
2146 (set (reg:SI 110)
2147 (gt (reg:CCX 100 %icc)
2148 (const_int 0)))
2149 into
2150 (set (reg:SI 110)
2151 (gt:DI (reg:CCX 100 %icc)
2152 (const_int 0)))
2153
2154 IE: The instruction recognizer needs to see the mode of the comparison to
2155 find the right instruction. We could use "gt:DI" right in the
2156 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2157
2158 static int
2159 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2160 {
2161 if (! TARGET_ARCH64
2162 && (GET_MODE (x) == DImode
2163 || GET_MODE (dest) == DImode))
2164 return 0;
2165
2166 /* Try to use the movrCC insns. */
2167 if (TARGET_ARCH64
2168 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2169 && y == const0_rtx
2170 && v9_regcmp_p (compare_code))
2171 {
2172 rtx op0 = x;
2173 rtx temp;
2174
2175 /* Special case for op0 != 0. This can be done with one instruction if
2176 dest == x. */
2177
2178 if (compare_code == NE
2179 && GET_MODE (dest) == DImode
2180 && rtx_equal_p (op0, dest))
2181 {
2182 emit_insn (gen_rtx_SET (VOIDmode, dest,
2183 gen_rtx_IF_THEN_ELSE (DImode,
2184 gen_rtx_fmt_ee (compare_code, DImode,
2185 op0, const0_rtx),
2186 const1_rtx,
2187 dest)));
2188 return 1;
2189 }
2190
2191 if (reg_overlap_mentioned_p (dest, op0))
2192 {
2193 /* Handle the case where dest == x.
2194 We "early clobber" the result. */
2195 op0 = gen_reg_rtx (GET_MODE (x));
2196 emit_move_insn (op0, x);
2197 }
2198
2199 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2200 if (GET_MODE (op0) != DImode)
2201 {
2202 temp = gen_reg_rtx (DImode);
2203 convert_move (temp, op0, 0);
2204 }
2205 else
2206 temp = op0;
2207 emit_insn (gen_rtx_SET (VOIDmode, dest,
2208 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2209 gen_rtx_fmt_ee (compare_code, DImode,
2210 temp, const0_rtx),
2211 const1_rtx,
2212 dest)));
2213 return 1;
2214 }
2215 else
2216 {
2217 x = gen_compare_reg_1 (compare_code, x, y);
2218 y = const0_rtx;
2219
2220 gcc_assert (GET_MODE (x) != CC_NOOVmode
2221 && GET_MODE (x) != CCX_NOOVmode);
2222
2223 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2224 emit_insn (gen_rtx_SET (VOIDmode, dest,
2225 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2226 gen_rtx_fmt_ee (compare_code,
2227 GET_MODE (x), x, y),
2228 const1_rtx, dest)));
2229 return 1;
2230 }
2231 }
2232
2233
2234 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2235 without jumps using the addx/subx instructions. */
2236
2237 bool
2238 emit_scc_insn (rtx operands[])
2239 {
2240 rtx tem;
2241 rtx x;
2242 rtx y;
2243 enum rtx_code code;
2244
2245 /* The quad-word fp compare library routines all return nonzero to indicate
2246 true, which is different from the equivalent libgcc routines, so we must
2247 handle them specially here. */
2248 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2249 {
2250 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2251 GET_CODE (operands[1]));
2252 operands[2] = XEXP (operands[1], 0);
2253 operands[3] = XEXP (operands[1], 1);
2254 }
2255
2256 code = GET_CODE (operands[1]);
2257 x = operands[2];
2258 y = operands[3];
2259
2260 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2261 more applications). The exception to this is "reg != 0" which can
2262 be done in one instruction on v9 (so we do it). */
2263 if (code == EQ)
2264 {
2265 if (GET_MODE (x) == SImode)
2266 {
2267 rtx pat = gen_seqsi_special (operands[0], x, y);
2268 emit_insn (pat);
2269 return true;
2270 }
2271 else if (GET_MODE (x) == DImode)
2272 {
2273 rtx pat = gen_seqdi_special (operands[0], x, y);
2274 emit_insn (pat);
2275 return true;
2276 }
2277 }
2278
2279 if (code == NE)
2280 {
2281 if (GET_MODE (x) == SImode)
2282 {
2283 rtx pat = gen_snesi_special (operands[0], x, y);
2284 emit_insn (pat);
2285 return true;
2286 }
2287 else if (GET_MODE (x) == DImode)
2288 {
2289 rtx pat = gen_snedi_special (operands[0], x, y);
2290 emit_insn (pat);
2291 return true;
2292 }
2293 }
2294
2295 /* For the rest, on v9 we can use conditional moves. */
2296
2297 if (TARGET_V9)
2298 {
2299 if (gen_v9_scc (operands[0], code, x, y))
2300 return true;
2301 }
2302
2303 /* We can do LTU and GEU using the addx/subx instructions too. And
2304 for GTU/LEU, if both operands are registers swap them and fall
2305 back to the easy case. */
2306 if (code == GTU || code == LEU)
2307 {
2308 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2309 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2310 {
2311 tem = x;
2312 x = y;
2313 y = tem;
2314 code = swap_condition (code);
2315 }
2316 }
2317
2318 if (code == LTU || code == GEU)
2319 {
2320 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2321 gen_rtx_fmt_ee (code, SImode,
2322 gen_compare_reg_1 (code, x, y),
2323 const0_rtx)));
2324 return true;
2325 }
2326
2327 /* Nope, do branches. */
2328 return false;
2329 }
2330
2331 /* Emit a conditional jump insn for the v9 architecture using comparison code
2332 CODE and jump target LABEL.
2333 This function exists to take advantage of the v9 brxx insns. */
2334
2335 static void
2336 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2337 {
2338 emit_jump_insn (gen_rtx_SET (VOIDmode,
2339 pc_rtx,
2340 gen_rtx_IF_THEN_ELSE (VOIDmode,
2341 gen_rtx_fmt_ee (code, GET_MODE (op0),
2342 op0, const0_rtx),
2343 gen_rtx_LABEL_REF (VOIDmode, label),
2344 pc_rtx)));
2345 }
2346
2347 void
2348 emit_conditional_branch_insn (rtx operands[])
2349 {
2350 /* The quad-word fp compare library routines all return nonzero to indicate
2351 true, which is different from the equivalent libgcc routines, so we must
2352 handle them specially here. */
2353 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2354 {
2355 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2356 GET_CODE (operands[0]));
2357 operands[1] = XEXP (operands[0], 0);
2358 operands[2] = XEXP (operands[0], 1);
2359 }
2360
2361 if (TARGET_ARCH64 && operands[2] == const0_rtx
2362 && GET_CODE (operands[1]) == REG
2363 && GET_MODE (operands[1]) == DImode)
2364 {
2365 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2366 return;
2367 }
2368
2369 operands[1] = gen_compare_reg (operands[0]);
2370 operands[2] = const0_rtx;
2371 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2372 operands[1], operands[2]);
2373 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2374 operands[3]));
2375 }
2376
2377
2378 /* Generate a DFmode part of a hard TFmode register.
2379 REG is the TFmode hard register, LOW is 1 for the
2380 low 64bit of the register and 0 otherwise.
2381 */
2382 rtx
2383 gen_df_reg (rtx reg, int low)
2384 {
2385 int regno = REGNO (reg);
2386
2387 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2388 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2389 return gen_rtx_REG (DFmode, regno);
2390 }
2391 \f
2392 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2393 Unlike normal calls, TFmode operands are passed by reference. It is
2394 assumed that no more than 3 operands are required. */
2395
2396 static void
2397 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2398 {
2399 rtx ret_slot = NULL, arg[3], func_sym;
2400 int i;
2401
2402 /* We only expect to be called for conversions, unary, and binary ops. */
2403 gcc_assert (nargs == 2 || nargs == 3);
2404
2405 for (i = 0; i < nargs; ++i)
2406 {
2407 rtx this_arg = operands[i];
2408 rtx this_slot;
2409
2410 /* TFmode arguments and return values are passed by reference. */
2411 if (GET_MODE (this_arg) == TFmode)
2412 {
2413 int force_stack_temp;
2414
2415 force_stack_temp = 0;
2416 if (TARGET_BUGGY_QP_LIB && i == 0)
2417 force_stack_temp = 1;
2418
2419 if (GET_CODE (this_arg) == MEM
2420 && ! force_stack_temp)
2421 this_arg = XEXP (this_arg, 0);
2422 else if (CONSTANT_P (this_arg)
2423 && ! force_stack_temp)
2424 {
2425 this_slot = force_const_mem (TFmode, this_arg);
2426 this_arg = XEXP (this_slot, 0);
2427 }
2428 else
2429 {
2430 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2431
2432 /* Operand 0 is the return value. We'll copy it out later. */
2433 if (i > 0)
2434 emit_move_insn (this_slot, this_arg);
2435 else
2436 ret_slot = this_slot;
2437
2438 this_arg = XEXP (this_slot, 0);
2439 }
2440 }
2441
2442 arg[i] = this_arg;
2443 }
2444
2445 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2446
2447 if (GET_MODE (operands[0]) == TFmode)
2448 {
2449 if (nargs == 2)
2450 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2451 arg[0], GET_MODE (arg[0]),
2452 arg[1], GET_MODE (arg[1]));
2453 else
2454 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2455 arg[0], GET_MODE (arg[0]),
2456 arg[1], GET_MODE (arg[1]),
2457 arg[2], GET_MODE (arg[2]));
2458
2459 if (ret_slot)
2460 emit_move_insn (operands[0], ret_slot);
2461 }
2462 else
2463 {
2464 rtx ret;
2465
2466 gcc_assert (nargs == 2);
2467
2468 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2469 GET_MODE (operands[0]), 1,
2470 arg[1], GET_MODE (arg[1]));
2471
2472 if (ret != operands[0])
2473 emit_move_insn (operands[0], ret);
2474 }
2475 }
2476
2477 /* Expand soft-float TFmode calls to sparc abi routines. */
2478
2479 static void
2480 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2481 {
2482 const char *func;
2483
2484 switch (code)
2485 {
2486 case PLUS:
2487 func = "_Qp_add";
2488 break;
2489 case MINUS:
2490 func = "_Qp_sub";
2491 break;
2492 case MULT:
2493 func = "_Qp_mul";
2494 break;
2495 case DIV:
2496 func = "_Qp_div";
2497 break;
2498 default:
2499 gcc_unreachable ();
2500 }
2501
2502 emit_soft_tfmode_libcall (func, 3, operands);
2503 }
2504
2505 static void
2506 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2507 {
2508 const char *func;
2509
2510 gcc_assert (code == SQRT);
2511 func = "_Qp_sqrt";
2512
2513 emit_soft_tfmode_libcall (func, 2, operands);
2514 }
2515
2516 static void
2517 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2518 {
2519 const char *func;
2520
2521 switch (code)
2522 {
2523 case FLOAT_EXTEND:
2524 switch (GET_MODE (operands[1]))
2525 {
2526 case SFmode:
2527 func = "_Qp_stoq";
2528 break;
2529 case DFmode:
2530 func = "_Qp_dtoq";
2531 break;
2532 default:
2533 gcc_unreachable ();
2534 }
2535 break;
2536
2537 case FLOAT_TRUNCATE:
2538 switch (GET_MODE (operands[0]))
2539 {
2540 case SFmode:
2541 func = "_Qp_qtos";
2542 break;
2543 case DFmode:
2544 func = "_Qp_qtod";
2545 break;
2546 default:
2547 gcc_unreachable ();
2548 }
2549 break;
2550
2551 case FLOAT:
2552 switch (GET_MODE (operands[1]))
2553 {
2554 case SImode:
2555 func = "_Qp_itoq";
2556 if (TARGET_ARCH64)
2557 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2558 break;
2559 case DImode:
2560 func = "_Qp_xtoq";
2561 break;
2562 default:
2563 gcc_unreachable ();
2564 }
2565 break;
2566
2567 case UNSIGNED_FLOAT:
2568 switch (GET_MODE (operands[1]))
2569 {
2570 case SImode:
2571 func = "_Qp_uitoq";
2572 if (TARGET_ARCH64)
2573 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2574 break;
2575 case DImode:
2576 func = "_Qp_uxtoq";
2577 break;
2578 default:
2579 gcc_unreachable ();
2580 }
2581 break;
2582
2583 case FIX:
2584 switch (GET_MODE (operands[0]))
2585 {
2586 case SImode:
2587 func = "_Qp_qtoi";
2588 break;
2589 case DImode:
2590 func = "_Qp_qtox";
2591 break;
2592 default:
2593 gcc_unreachable ();
2594 }
2595 break;
2596
2597 case UNSIGNED_FIX:
2598 switch (GET_MODE (operands[0]))
2599 {
2600 case SImode:
2601 func = "_Qp_qtoui";
2602 break;
2603 case DImode:
2604 func = "_Qp_qtoux";
2605 break;
2606 default:
2607 gcc_unreachable ();
2608 }
2609 break;
2610
2611 default:
2612 gcc_unreachable ();
2613 }
2614
2615 emit_soft_tfmode_libcall (func, 2, operands);
2616 }
2617
2618 /* Expand a hard-float tfmode operation. All arguments must be in
2619 registers. */
2620
2621 static void
2622 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2623 {
2624 rtx op, dest;
2625
2626 if (GET_RTX_CLASS (code) == RTX_UNARY)
2627 {
2628 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2629 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2630 }
2631 else
2632 {
2633 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2634 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2635 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2636 operands[1], operands[2]);
2637 }
2638
2639 if (register_operand (operands[0], VOIDmode))
2640 dest = operands[0];
2641 else
2642 dest = gen_reg_rtx (GET_MODE (operands[0]));
2643
2644 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2645
2646 if (dest != operands[0])
2647 emit_move_insn (operands[0], dest);
2648 }
2649
2650 void
2651 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2652 {
2653 if (TARGET_HARD_QUAD)
2654 emit_hard_tfmode_operation (code, operands);
2655 else
2656 emit_soft_tfmode_binop (code, operands);
2657 }
2658
2659 void
2660 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2661 {
2662 if (TARGET_HARD_QUAD)
2663 emit_hard_tfmode_operation (code, operands);
2664 else
2665 emit_soft_tfmode_unop (code, operands);
2666 }
2667
2668 void
2669 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2670 {
2671 if (TARGET_HARD_QUAD)
2672 emit_hard_tfmode_operation (code, operands);
2673 else
2674 emit_soft_tfmode_cvt (code, operands);
2675 }
2676 \f
2677 /* Return nonzero if a branch/jump/call instruction will be emitting
2678 nop into its delay slot. */
2679
2680 int
2681 empty_delay_slot (rtx insn)
2682 {
2683 rtx seq;
2684
2685 /* If no previous instruction (should not happen), return true. */
2686 if (PREV_INSN (insn) == NULL)
2687 return 1;
2688
2689 seq = NEXT_INSN (PREV_INSN (insn));
2690 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2691 return 0;
2692
2693 return 1;
2694 }
2695
2696 /* Return nonzero if TRIAL can go into the call delay slot. */
2697
2698 int
2699 tls_call_delay (rtx trial)
2700 {
2701 rtx pat;
2702
2703 /* Binutils allows
2704 call __tls_get_addr, %tgd_call (foo)
2705 add %l7, %o0, %o0, %tgd_add (foo)
2706 while Sun as/ld does not. */
2707 if (TARGET_GNU_TLS || !TARGET_TLS)
2708 return 1;
2709
2710 pat = PATTERN (trial);
2711
2712 /* We must reject tgd_add{32|64}, i.e.
2713 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2714 and tldm_add{32|64}, i.e.
2715 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2716 for Sun as/ld. */
2717 if (GET_CODE (pat) == SET
2718 && GET_CODE (SET_SRC (pat)) == PLUS)
2719 {
2720 rtx unspec = XEXP (SET_SRC (pat), 1);
2721
2722 if (GET_CODE (unspec) == UNSPEC
2723 && (XINT (unspec, 1) == UNSPEC_TLSGD
2724 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2725 return 0;
2726 }
2727
2728 return 1;
2729 }
2730
2731 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2732 instruction. RETURN_P is true if the v9 variant 'return' is to be
2733 considered in the test too.
2734
2735 TRIAL must be a SET whose destination is a REG appropriate for the
2736 'restore' instruction or, if RETURN_P is true, for the 'return'
2737 instruction. */
2738
2739 static int
2740 eligible_for_restore_insn (rtx trial, bool return_p)
2741 {
2742 rtx pat = PATTERN (trial);
2743 rtx src = SET_SRC (pat);
2744
2745 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2746 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2747 && arith_operand (src, GET_MODE (src)))
2748 {
2749 if (TARGET_ARCH64)
2750 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2751 else
2752 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2753 }
2754
2755 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2756 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2757 && arith_double_operand (src, GET_MODE (src)))
2758 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2759
2760 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2761 else if (! TARGET_FPU && register_operand (src, SFmode))
2762 return 1;
2763
2764 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2765 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2766 return 1;
2767
2768 /* If we have the 'return' instruction, anything that does not use
2769 local or output registers and can go into a delay slot wins. */
2770 else if (return_p
2771 && TARGET_V9
2772 && !epilogue_renumber (&pat, 1)
2773 && get_attr_in_uncond_branch_delay (trial)
2774 == IN_UNCOND_BRANCH_DELAY_TRUE)
2775 return 1;
2776
2777 /* The 'restore src1,src2,dest' pattern for SImode. */
2778 else if (GET_CODE (src) == PLUS
2779 && register_operand (XEXP (src, 0), SImode)
2780 && arith_operand (XEXP (src, 1), SImode))
2781 return 1;
2782
2783 /* The 'restore src1,src2,dest' pattern for DImode. */
2784 else if (GET_CODE (src) == PLUS
2785 && register_operand (XEXP (src, 0), DImode)
2786 && arith_double_operand (XEXP (src, 1), DImode))
2787 return 1;
2788
2789 /* The 'restore src1,%lo(src2),dest' pattern. */
2790 else if (GET_CODE (src) == LO_SUM
2791 && ! TARGET_CM_MEDMID
2792 && ((register_operand (XEXP (src, 0), SImode)
2793 && immediate_operand (XEXP (src, 1), SImode))
2794 || (TARGET_ARCH64
2795 && register_operand (XEXP (src, 0), DImode)
2796 && immediate_operand (XEXP (src, 1), DImode))))
2797 return 1;
2798
2799 /* The 'restore src,src,dest' pattern. */
2800 else if (GET_CODE (src) == ASHIFT
2801 && (register_operand (XEXP (src, 0), SImode)
2802 || register_operand (XEXP (src, 0), DImode))
2803 && XEXP (src, 1) == const1_rtx)
2804 return 1;
2805
2806 return 0;
2807 }
2808
2809 /* Return nonzero if TRIAL can go into the function return's delay slot. */
2810
2811 int
2812 eligible_for_return_delay (rtx trial)
2813 {
2814 rtx pat;
2815
2816 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2817 return 0;
2818
2819 if (get_attr_length (trial) != 1)
2820 return 0;
2821
2822 /* If the function uses __builtin_eh_return, the eh_return machinery
2823 occupies the delay slot. */
2824 if (crtl->calls_eh_return)
2825 return 0;
2826
2827 /* In the case of a leaf or flat function, anything can go into the slot. */
2828 if (sparc_leaf_function_p || TARGET_FLAT)
2829 return
2830 get_attr_in_uncond_branch_delay (trial) == IN_UNCOND_BRANCH_DELAY_TRUE;
2831
2832 pat = PATTERN (trial);
2833
2834 /* Otherwise, only operations which can be done in tandem with
2835 a `restore' or `return' insn can go into the delay slot. */
2836 if (GET_CODE (SET_DEST (pat)) != REG
2837 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2838 return 0;
2839
2840 /* If this instruction sets up floating point register and we have a return
2841 instruction, it can probably go in. But restore will not work
2842 with FP_REGS. */
2843 if (REGNO (SET_DEST (pat)) >= 32)
2844 return (TARGET_V9
2845 && !epilogue_renumber (&pat, 1)
2846 && get_attr_in_uncond_branch_delay (trial)
2847 == IN_UNCOND_BRANCH_DELAY_TRUE);
2848
2849 return eligible_for_restore_insn (trial, true);
2850 }
2851
2852 /* Return nonzero if TRIAL can go into the sibling call's delay slot. */
2853
2854 int
2855 eligible_for_sibcall_delay (rtx trial)
2856 {
2857 rtx pat;
2858
2859 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2860 return 0;
2861
2862 if (get_attr_length (trial) != 1)
2863 return 0;
2864
2865 pat = PATTERN (trial);
2866
2867 if (sparc_leaf_function_p || TARGET_FLAT)
2868 {
2869 /* If the tail call is done using the call instruction,
2870 we have to restore %o7 in the delay slot. */
2871 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2872 return 0;
2873
2874 /* %g1 is used to build the function address */
2875 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2876 return 0;
2877
2878 return 1;
2879 }
2880
2881 /* Otherwise, only operations which can be done in tandem with
2882 a `restore' insn can go into the delay slot. */
2883 if (GET_CODE (SET_DEST (pat)) != REG
2884 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2885 || REGNO (SET_DEST (pat)) >= 32)
2886 return 0;
2887
2888 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2889 in most cases. */
2890 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2891 return 0;
2892
2893 return eligible_for_restore_insn (trial, false);
2894 }
2895
2896 int
2897 short_branch (int uid1, int uid2)
2898 {
2899 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2900
2901 /* Leave a few words of "slop". */
2902 if (delta >= -1023 && delta <= 1022)
2903 return 1;
2904
2905 return 0;
2906 }
2907
2908 /* Return nonzero if REG is not used after INSN.
2909 We assume REG is a reload reg, and therefore does
2910 not live past labels or calls or jumps. */
2911 int
2912 reg_unused_after (rtx reg, rtx insn)
2913 {
2914 enum rtx_code code, prev_code = UNKNOWN;
2915
2916 while ((insn = NEXT_INSN (insn)))
2917 {
2918 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2919 return 1;
2920
2921 code = GET_CODE (insn);
2922 if (GET_CODE (insn) == CODE_LABEL)
2923 return 1;
2924
2925 if (INSN_P (insn))
2926 {
2927 rtx set = single_set (insn);
2928 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2929 if (set && in_src)
2930 return 0;
2931 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2932 return 1;
2933 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2934 return 0;
2935 }
2936 prev_code = code;
2937 }
2938 return 1;
2939 }
2940 \f
2941 /* Determine if it's legal to put X into the constant pool. This
2942 is not possible if X contains the address of a symbol that is
2943 not constant (TLS) or not known at final link time (PIC). */
2944
2945 static bool
2946 sparc_cannot_force_const_mem (enum machine_mode mode, rtx x)
2947 {
2948 switch (GET_CODE (x))
2949 {
2950 case CONST_INT:
2951 case CONST_DOUBLE:
2952 case CONST_VECTOR:
2953 /* Accept all non-symbolic constants. */
2954 return false;
2955
2956 case LABEL_REF:
2957 /* Labels are OK iff we are non-PIC. */
2958 return flag_pic != 0;
2959
2960 case SYMBOL_REF:
2961 /* 'Naked' TLS symbol references are never OK,
2962 non-TLS symbols are OK iff we are non-PIC. */
2963 if (SYMBOL_REF_TLS_MODEL (x))
2964 return true;
2965 else
2966 return flag_pic != 0;
2967
2968 case CONST:
2969 return sparc_cannot_force_const_mem (mode, XEXP (x, 0));
2970 case PLUS:
2971 case MINUS:
2972 return sparc_cannot_force_const_mem (mode, XEXP (x, 0))
2973 || sparc_cannot_force_const_mem (mode, XEXP (x, 1));
2974 case UNSPEC:
2975 return true;
2976 default:
2977 gcc_unreachable ();
2978 }
2979 }
2980 \f
2981 /* Global Offset Table support. */
2982 static GTY(()) rtx got_helper_rtx = NULL_RTX;
2983 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
2984
2985 /* Return the SYMBOL_REF for the Global Offset Table. */
2986
2987 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
2988
2989 static rtx
2990 sparc_got (void)
2991 {
2992 if (!sparc_got_symbol)
2993 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2994
2995 return sparc_got_symbol;
2996 }
2997
2998 /* Ensure that we are not using patterns that are not OK with PIC. */
2999
3000 int
3001 check_pic (int i)
3002 {
3003 rtx op;
3004
3005 switch (flag_pic)
3006 {
3007 case 1:
3008 op = recog_data.operand[i];
3009 gcc_assert (GET_CODE (op) != SYMBOL_REF
3010 && (GET_CODE (op) != CONST
3011 || (GET_CODE (XEXP (op, 0)) == MINUS
3012 && XEXP (XEXP (op, 0), 0) == sparc_got ()
3013 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
3014 case 2:
3015 default:
3016 return 1;
3017 }
3018 }
3019
3020 /* Return true if X is an address which needs a temporary register when
3021 reloaded while generating PIC code. */
3022
3023 int
3024 pic_address_needs_scratch (rtx x)
3025 {
3026 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3027 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3028 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3029 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3030 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3031 return 1;
3032
3033 return 0;
3034 }
3035
3036 /* Determine if a given RTX is a valid constant. We already know this
3037 satisfies CONSTANT_P. */
3038
3039 static bool
3040 sparc_legitimate_constant_p (enum machine_mode mode, rtx x)
3041 {
3042 switch (GET_CODE (x))
3043 {
3044 case CONST:
3045 case SYMBOL_REF:
3046 if (sparc_tls_referenced_p (x))
3047 return false;
3048 break;
3049
3050 case CONST_DOUBLE:
3051 if (GET_MODE (x) == VOIDmode)
3052 return true;
3053
3054 /* Floating point constants are generally not ok.
3055 The only exception is 0.0 in VIS. */
3056 if (TARGET_VIS
3057 && SCALAR_FLOAT_MODE_P (mode)
3058 && const_zero_operand (x, mode))
3059 return true;
3060
3061 return false;
3062
3063 case CONST_VECTOR:
3064 /* Vector constants are generally not ok.
3065 The only exception is 0 in VIS. */
3066 if (TARGET_VIS
3067 && const_zero_operand (x, mode))
3068 return true;
3069
3070 return false;
3071
3072 default:
3073 break;
3074 }
3075
3076 return true;
3077 }
3078
3079 /* Determine if a given RTX is a valid constant address. */
3080
3081 bool
3082 constant_address_p (rtx x)
3083 {
3084 switch (GET_CODE (x))
3085 {
3086 case LABEL_REF:
3087 case CONST_INT:
3088 case HIGH:
3089 return true;
3090
3091 case CONST:
3092 if (flag_pic && pic_address_needs_scratch (x))
3093 return false;
3094 return sparc_legitimate_constant_p (Pmode, x);
3095
3096 case SYMBOL_REF:
3097 return !flag_pic && sparc_legitimate_constant_p (Pmode, x);
3098
3099 default:
3100 return false;
3101 }
3102 }
3103
3104 /* Nonzero if the constant value X is a legitimate general operand
3105 when generating PIC code. It is given that flag_pic is on and
3106 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3107
3108 bool
3109 legitimate_pic_operand_p (rtx x)
3110 {
3111 if (pic_address_needs_scratch (x))
3112 return false;
3113 if (sparc_tls_referenced_p (x))
3114 return false;
3115 return true;
3116 }
3117
3118 #define RTX_OK_FOR_OFFSET_P(X, MODE) \
3119 (CONST_INT_P (X) \
3120 && INTVAL (X) >= -0x1000 \
3121 && INTVAL (X) < (0x1000 - GET_MODE_SIZE (MODE)))
3122
3123 #define RTX_OK_FOR_OLO10_P(X, MODE) \
3124 (CONST_INT_P (X) \
3125 && INTVAL (X) >= -0x1000 \
3126 && INTVAL (X) < (0xc00 - GET_MODE_SIZE (MODE)))
3127
3128 /* Handle the TARGET_LEGITIMATE_ADDRESS_P target hook.
3129
3130 On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
3131 ordinarily. This changes a bit when generating PIC. */
3132
3133 static bool
3134 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3135 {
3136 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3137
3138 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3139 rs1 = addr;
3140 else if (GET_CODE (addr) == PLUS)
3141 {
3142 rs1 = XEXP (addr, 0);
3143 rs2 = XEXP (addr, 1);
3144
3145 /* Canonicalize. REG comes first, if there are no regs,
3146 LO_SUM comes first. */
3147 if (!REG_P (rs1)
3148 && GET_CODE (rs1) != SUBREG
3149 && (REG_P (rs2)
3150 || GET_CODE (rs2) == SUBREG
3151 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3152 {
3153 rs1 = XEXP (addr, 1);
3154 rs2 = XEXP (addr, 0);
3155 }
3156
3157 if ((flag_pic == 1
3158 && rs1 == pic_offset_table_rtx
3159 && !REG_P (rs2)
3160 && GET_CODE (rs2) != SUBREG
3161 && GET_CODE (rs2) != LO_SUM
3162 && GET_CODE (rs2) != MEM
3163 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3164 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3165 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3166 || ((REG_P (rs1)
3167 || GET_CODE (rs1) == SUBREG)
3168 && RTX_OK_FOR_OFFSET_P (rs2, mode)))
3169 {
3170 imm1 = rs2;
3171 rs2 = NULL;
3172 }
3173 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3174 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3175 {
3176 /* We prohibit REG + REG for TFmode when there are no quad move insns
3177 and we consequently need to split. We do this because REG+REG
3178 is not an offsettable address. If we get the situation in reload
3179 where source and destination of a movtf pattern are both MEMs with
3180 REG+REG address, then only one of them gets converted to an
3181 offsettable address. */
3182 if (mode == TFmode
3183 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3184 return 0;
3185
3186 /* We prohibit REG + REG on ARCH32 if not optimizing for
3187 DFmode/DImode because then mem_min_alignment is likely to be zero
3188 after reload and the forced split would lack a matching splitter
3189 pattern. */
3190 if (TARGET_ARCH32 && !optimize
3191 && (mode == DFmode || mode == DImode))
3192 return 0;
3193 }
3194 else if (USE_AS_OFFSETABLE_LO10
3195 && GET_CODE (rs1) == LO_SUM
3196 && TARGET_ARCH64
3197 && ! TARGET_CM_MEDMID
3198 && RTX_OK_FOR_OLO10_P (rs2, mode))
3199 {
3200 rs2 = NULL;
3201 imm1 = XEXP (rs1, 1);
3202 rs1 = XEXP (rs1, 0);
3203 if (!CONSTANT_P (imm1)
3204 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3205 return 0;
3206 }
3207 }
3208 else if (GET_CODE (addr) == LO_SUM)
3209 {
3210 rs1 = XEXP (addr, 0);
3211 imm1 = XEXP (addr, 1);
3212
3213 if (!CONSTANT_P (imm1)
3214 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3215 return 0;
3216
3217 /* We can't allow TFmode in 32-bit mode, because an offset greater
3218 than the alignment (8) may cause the LO_SUM to overflow. */
3219 if (mode == TFmode && TARGET_ARCH32)
3220 return 0;
3221 }
3222 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3223 return 1;
3224 else
3225 return 0;
3226
3227 if (GET_CODE (rs1) == SUBREG)
3228 rs1 = SUBREG_REG (rs1);
3229 if (!REG_P (rs1))
3230 return 0;
3231
3232 if (rs2)
3233 {
3234 if (GET_CODE (rs2) == SUBREG)
3235 rs2 = SUBREG_REG (rs2);
3236 if (!REG_P (rs2))
3237 return 0;
3238 }
3239
3240 if (strict)
3241 {
3242 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3243 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3244 return 0;
3245 }
3246 else
3247 {
3248 if ((REGNO (rs1) >= 32
3249 && REGNO (rs1) != FRAME_POINTER_REGNUM
3250 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3251 || (rs2
3252 && (REGNO (rs2) >= 32
3253 && REGNO (rs2) != FRAME_POINTER_REGNUM
3254 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3255 return 0;
3256 }
3257 return 1;
3258 }
3259
3260 /* Return the SYMBOL_REF for the tls_get_addr function. */
3261
3262 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3263
3264 static rtx
3265 sparc_tls_get_addr (void)
3266 {
3267 if (!sparc_tls_symbol)
3268 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3269
3270 return sparc_tls_symbol;
3271 }
3272
3273 /* Return the Global Offset Table to be used in TLS mode. */
3274
3275 static rtx
3276 sparc_tls_got (void)
3277 {
3278 /* In PIC mode, this is just the PIC offset table. */
3279 if (flag_pic)
3280 {
3281 crtl->uses_pic_offset_table = 1;
3282 return pic_offset_table_rtx;
3283 }
3284
3285 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3286 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3287 if (TARGET_SUN_TLS && TARGET_ARCH32)
3288 {
3289 load_got_register ();
3290 return global_offset_table_rtx;
3291 }
3292
3293 /* In all other cases, we load a new pseudo with the GOT symbol. */
3294 return copy_to_reg (sparc_got ());
3295 }
3296
3297 /* Return true if X contains a thread-local symbol. */
3298
3299 static bool
3300 sparc_tls_referenced_p (rtx x)
3301 {
3302 if (!TARGET_HAVE_TLS)
3303 return false;
3304
3305 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3306 x = XEXP (XEXP (x, 0), 0);
3307
3308 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3309 return true;
3310
3311 /* That's all we handle in sparc_legitimize_tls_address for now. */
3312 return false;
3313 }
3314
3315 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3316 this (thread-local) address. */
3317
3318 static rtx
3319 sparc_legitimize_tls_address (rtx addr)
3320 {
3321 rtx temp1, temp2, temp3, ret, o0, got, insn;
3322
3323 gcc_assert (can_create_pseudo_p ());
3324
3325 if (GET_CODE (addr) == SYMBOL_REF)
3326 switch (SYMBOL_REF_TLS_MODEL (addr))
3327 {
3328 case TLS_MODEL_GLOBAL_DYNAMIC:
3329 start_sequence ();
3330 temp1 = gen_reg_rtx (SImode);
3331 temp2 = gen_reg_rtx (SImode);
3332 ret = gen_reg_rtx (Pmode);
3333 o0 = gen_rtx_REG (Pmode, 8);
3334 got = sparc_tls_got ();
3335 emit_insn (gen_tgd_hi22 (temp1, addr));
3336 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3337 if (TARGET_ARCH32)
3338 {
3339 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3340 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3341 addr, const1_rtx));
3342 }
3343 else
3344 {
3345 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3346 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3347 addr, const1_rtx));
3348 }
3349 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3350 insn = get_insns ();
3351 end_sequence ();
3352 emit_libcall_block (insn, ret, o0, addr);
3353 break;
3354
3355 case TLS_MODEL_LOCAL_DYNAMIC:
3356 start_sequence ();
3357 temp1 = gen_reg_rtx (SImode);
3358 temp2 = gen_reg_rtx (SImode);
3359 temp3 = gen_reg_rtx (Pmode);
3360 ret = gen_reg_rtx (Pmode);
3361 o0 = gen_rtx_REG (Pmode, 8);
3362 got = sparc_tls_got ();
3363 emit_insn (gen_tldm_hi22 (temp1));
3364 emit_insn (gen_tldm_lo10 (temp2, temp1));
3365 if (TARGET_ARCH32)
3366 {
3367 emit_insn (gen_tldm_add32 (o0, got, temp2));
3368 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3369 const1_rtx));
3370 }
3371 else
3372 {
3373 emit_insn (gen_tldm_add64 (o0, got, temp2));
3374 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3375 const1_rtx));
3376 }
3377 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3378 insn = get_insns ();
3379 end_sequence ();
3380 emit_libcall_block (insn, temp3, o0,
3381 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3382 UNSPEC_TLSLD_BASE));
3383 temp1 = gen_reg_rtx (SImode);
3384 temp2 = gen_reg_rtx (SImode);
3385 emit_insn (gen_tldo_hix22 (temp1, addr));
3386 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3387 if (TARGET_ARCH32)
3388 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3389 else
3390 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3391 break;
3392
3393 case TLS_MODEL_INITIAL_EXEC:
3394 temp1 = gen_reg_rtx (SImode);
3395 temp2 = gen_reg_rtx (SImode);
3396 temp3 = gen_reg_rtx (Pmode);
3397 got = sparc_tls_got ();
3398 emit_insn (gen_tie_hi22 (temp1, addr));
3399 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3400 if (TARGET_ARCH32)
3401 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3402 else
3403 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3404 if (TARGET_SUN_TLS)
3405 {
3406 ret = gen_reg_rtx (Pmode);
3407 if (TARGET_ARCH32)
3408 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3409 temp3, addr));
3410 else
3411 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3412 temp3, addr));
3413 }
3414 else
3415 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3416 break;
3417
3418 case TLS_MODEL_LOCAL_EXEC:
3419 temp1 = gen_reg_rtx (Pmode);
3420 temp2 = gen_reg_rtx (Pmode);
3421 if (TARGET_ARCH32)
3422 {
3423 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3424 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3425 }
3426 else
3427 {
3428 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3429 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3430 }
3431 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3432 break;
3433
3434 default:
3435 gcc_unreachable ();
3436 }
3437
3438 else if (GET_CODE (addr) == CONST)
3439 {
3440 rtx base, offset;
3441
3442 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3443
3444 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3445 offset = XEXP (XEXP (addr, 0), 1);
3446
3447 base = force_operand (base, NULL_RTX);
3448 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3449 offset = force_reg (Pmode, offset);
3450 ret = gen_rtx_PLUS (Pmode, base, offset);
3451 }
3452
3453 else
3454 gcc_unreachable (); /* for now ... */
3455
3456 return ret;
3457 }
3458
3459 /* Legitimize PIC addresses. If the address is already position-independent,
3460 we return ORIG. Newly generated position-independent addresses go into a
3461 reg. This is REG if nonzero, otherwise we allocate register(s) as
3462 necessary. */
3463
3464 static rtx
3465 sparc_legitimize_pic_address (rtx orig, rtx reg)
3466 {
3467 bool gotdata_op = false;
3468
3469 if (GET_CODE (orig) == SYMBOL_REF
3470 /* See the comment in sparc_expand_move. */
3471 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3472 {
3473 rtx pic_ref, address;
3474 rtx insn;
3475
3476 if (reg == 0)
3477 {
3478 gcc_assert (! reload_in_progress && ! reload_completed);
3479 reg = gen_reg_rtx (Pmode);
3480 }
3481
3482 if (flag_pic == 2)
3483 {
3484 /* If not during reload, allocate another temp reg here for loading
3485 in the address, so that these instructions can be optimized
3486 properly. */
3487 rtx temp_reg = ((reload_in_progress || reload_completed)
3488 ? reg : gen_reg_rtx (Pmode));
3489
3490 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3491 won't get confused into thinking that these two instructions
3492 are loading in the true address of the symbol. If in the
3493 future a PIC rtx exists, that should be used instead. */
3494 if (TARGET_ARCH64)
3495 {
3496 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3497 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3498 }
3499 else
3500 {
3501 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3502 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3503 }
3504 address = temp_reg;
3505 gotdata_op = true;
3506 }
3507 else
3508 address = orig;
3509
3510 crtl->uses_pic_offset_table = 1;
3511 if (gotdata_op)
3512 {
3513 if (TARGET_ARCH64)
3514 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3515 pic_offset_table_rtx,
3516 address, orig));
3517 else
3518 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3519 pic_offset_table_rtx,
3520 address, orig));
3521 }
3522 else
3523 {
3524 pic_ref
3525 = gen_const_mem (Pmode,
3526 gen_rtx_PLUS (Pmode,
3527 pic_offset_table_rtx, address));
3528 insn = emit_move_insn (reg, pic_ref);
3529 }
3530
3531 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3532 by loop. */
3533 set_unique_reg_note (insn, REG_EQUAL, orig);
3534 return reg;
3535 }
3536 else if (GET_CODE (orig) == CONST)
3537 {
3538 rtx base, offset;
3539
3540 if (GET_CODE (XEXP (orig, 0)) == PLUS
3541 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3542 return orig;
3543
3544 if (reg == 0)
3545 {
3546 gcc_assert (! reload_in_progress && ! reload_completed);
3547 reg = gen_reg_rtx (Pmode);
3548 }
3549
3550 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3551 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3552 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3553 base == reg ? NULL_RTX : reg);
3554
3555 if (GET_CODE (offset) == CONST_INT)
3556 {
3557 if (SMALL_INT (offset))
3558 return plus_constant (base, INTVAL (offset));
3559 else if (! reload_in_progress && ! reload_completed)
3560 offset = force_reg (Pmode, offset);
3561 else
3562 /* If we reach here, then something is seriously wrong. */
3563 gcc_unreachable ();
3564 }
3565 return gen_rtx_PLUS (Pmode, base, offset);
3566 }
3567 else if (GET_CODE (orig) == LABEL_REF)
3568 /* ??? We ought to be checking that the register is live instead, in case
3569 it is eliminated. */
3570 crtl->uses_pic_offset_table = 1;
3571
3572 return orig;
3573 }
3574
3575 /* Try machine-dependent ways of modifying an illegitimate address X
3576 to be legitimate. If we find one, return the new, valid address.
3577
3578 OLDX is the address as it was before break_out_memory_refs was called.
3579 In some cases it is useful to look at this to decide what needs to be done.
3580
3581 MODE is the mode of the operand pointed to by X.
3582
3583 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3584
3585 static rtx
3586 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3587 enum machine_mode mode)
3588 {
3589 rtx orig_x = x;
3590
3591 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3592 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3593 force_operand (XEXP (x, 0), NULL_RTX));
3594 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3595 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3596 force_operand (XEXP (x, 1), NULL_RTX));
3597 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3598 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3599 XEXP (x, 1));
3600 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3601 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3602 force_operand (XEXP (x, 1), NULL_RTX));
3603
3604 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3605 return x;
3606
3607 if (sparc_tls_referenced_p (x))
3608 x = sparc_legitimize_tls_address (x);
3609 else if (flag_pic)
3610 x = sparc_legitimize_pic_address (x, NULL_RTX);
3611 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3612 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3613 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3614 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3615 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3616 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3617 else if (GET_CODE (x) == SYMBOL_REF
3618 || GET_CODE (x) == CONST
3619 || GET_CODE (x) == LABEL_REF)
3620 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3621
3622 return x;
3623 }
3624
3625 /* Delegitimize an address that was legitimized by the above function. */
3626
3627 static rtx
3628 sparc_delegitimize_address (rtx x)
3629 {
3630 x = delegitimize_mem_from_attrs (x);
3631
3632 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 1)) == UNSPEC)
3633 switch (XINT (XEXP (x, 1), 1))
3634 {
3635 case UNSPEC_MOVE_PIC:
3636 case UNSPEC_TLSLE:
3637 x = XVECEXP (XEXP (x, 1), 0, 0);
3638 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3639 break;
3640 default:
3641 break;
3642 }
3643
3644 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3645 if (GET_CODE (x) == MINUS
3646 && REG_P (XEXP (x, 0))
3647 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3648 && GET_CODE (XEXP (x, 1)) == LO_SUM
3649 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3650 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3651 {
3652 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3653 gcc_assert (GET_CODE (x) == LABEL_REF);
3654 }
3655
3656 return x;
3657 }
3658
3659 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3660 replace the input X, or the original X if no replacement is called for.
3661 The output parameter *WIN is 1 if the calling macro should goto WIN,
3662 0 if it should not.
3663
3664 For SPARC, we wish to handle addresses by splitting them into
3665 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3666 This cuts the number of extra insns by one.
3667
3668 Do nothing when generating PIC code and the address is a symbolic
3669 operand or requires a scratch register. */
3670
3671 rtx
3672 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3673 int opnum, int type,
3674 int ind_levels ATTRIBUTE_UNUSED, int *win)
3675 {
3676 /* Decompose SImode constants into HIGH+LO_SUM. */
3677 if (CONSTANT_P (x)
3678 && (mode != TFmode || TARGET_ARCH64)
3679 && GET_MODE (x) == SImode
3680 && GET_CODE (x) != LO_SUM
3681 && GET_CODE (x) != HIGH
3682 && sparc_cmodel <= CM_MEDLOW
3683 && !(flag_pic
3684 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3685 {
3686 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3687 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3688 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3689 opnum, (enum reload_type)type);
3690 *win = 1;
3691 return x;
3692 }
3693
3694 /* We have to recognize what we have already generated above. */
3695 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3696 {
3697 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3698 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3699 opnum, (enum reload_type)type);
3700 *win = 1;
3701 return x;
3702 }
3703
3704 *win = 0;
3705 return x;
3706 }
3707
3708 /* Return true if ADDR (a legitimate address expression)
3709 has an effect that depends on the machine mode it is used for.
3710
3711 In PIC mode,
3712
3713 (mem:HI [%l7+a])
3714
3715 is not equivalent to
3716
3717 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3718
3719 because [%l7+a+1] is interpreted as the address of (a+1). */
3720
3721
3722 static bool
3723 sparc_mode_dependent_address_p (const_rtx addr)
3724 {
3725 if (flag_pic && GET_CODE (addr) == PLUS)
3726 {
3727 rtx op0 = XEXP (addr, 0);
3728 rtx op1 = XEXP (addr, 1);
3729 if (op0 == pic_offset_table_rtx
3730 && symbolic_operand (op1, VOIDmode))
3731 return true;
3732 }
3733
3734 return false;
3735 }
3736
3737 #ifdef HAVE_GAS_HIDDEN
3738 # define USE_HIDDEN_LINKONCE 1
3739 #else
3740 # define USE_HIDDEN_LINKONCE 0
3741 #endif
3742
3743 static void
3744 get_pc_thunk_name (char name[32], unsigned int regno)
3745 {
3746 const char *reg_name = reg_names[regno];
3747
3748 /* Skip the leading '%' as that cannot be used in a
3749 symbol name. */
3750 reg_name += 1;
3751
3752 if (USE_HIDDEN_LINKONCE)
3753 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3754 else
3755 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3756 }
3757
3758 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3759
3760 static rtx
3761 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3762 {
3763 int orig_flag_pic = flag_pic;
3764 rtx insn;
3765
3766 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3767 flag_pic = 0;
3768 if (TARGET_ARCH64)
3769 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3770 else
3771 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3772 flag_pic = orig_flag_pic;
3773
3774 return insn;
3775 }
3776
3777 /* Emit code to load the GOT register. */
3778
3779 void
3780 load_got_register (void)
3781 {
3782 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3783 if (!global_offset_table_rtx)
3784 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3785
3786 if (TARGET_VXWORKS_RTP)
3787 emit_insn (gen_vxworks_load_got ());
3788 else
3789 {
3790 /* The GOT symbol is subject to a PC-relative relocation so we need a
3791 helper function to add the PC value and thus get the final value. */
3792 if (!got_helper_rtx)
3793 {
3794 char name[32];
3795 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3796 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3797 }
3798
3799 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3800 got_helper_rtx,
3801 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3802 }
3803
3804 /* Need to emit this whether or not we obey regdecls,
3805 since setjmp/longjmp can cause life info to screw up.
3806 ??? In the case where we don't obey regdecls, this is not sufficient
3807 since we may not fall out the bottom. */
3808 emit_use (global_offset_table_rtx);
3809 }
3810
3811 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3812 address of the call target. */
3813
3814 void
3815 sparc_emit_call_insn (rtx pat, rtx addr)
3816 {
3817 rtx insn;
3818
3819 insn = emit_call_insn (pat);
3820
3821 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3822 if (TARGET_VXWORKS_RTP
3823 && flag_pic
3824 && GET_CODE (addr) == SYMBOL_REF
3825 && (SYMBOL_REF_DECL (addr)
3826 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3827 : !SYMBOL_REF_LOCAL_P (addr)))
3828 {
3829 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3830 crtl->uses_pic_offset_table = 1;
3831 }
3832 }
3833 \f
3834 /* Return 1 if RTX is a MEM which is known to be aligned to at
3835 least a DESIRED byte boundary. */
3836
3837 int
3838 mem_min_alignment (rtx mem, int desired)
3839 {
3840 rtx addr, base, offset;
3841
3842 /* If it's not a MEM we can't accept it. */
3843 if (GET_CODE (mem) != MEM)
3844 return 0;
3845
3846 /* Obviously... */
3847 if (!TARGET_UNALIGNED_DOUBLES
3848 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3849 return 1;
3850
3851 /* ??? The rest of the function predates MEM_ALIGN so
3852 there is probably a bit of redundancy. */
3853 addr = XEXP (mem, 0);
3854 base = offset = NULL_RTX;
3855 if (GET_CODE (addr) == PLUS)
3856 {
3857 if (GET_CODE (XEXP (addr, 0)) == REG)
3858 {
3859 base = XEXP (addr, 0);
3860
3861 /* What we are saying here is that if the base
3862 REG is aligned properly, the compiler will make
3863 sure any REG based index upon it will be so
3864 as well. */
3865 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3866 offset = XEXP (addr, 1);
3867 else
3868 offset = const0_rtx;
3869 }
3870 }
3871 else if (GET_CODE (addr) == REG)
3872 {
3873 base = addr;
3874 offset = const0_rtx;
3875 }
3876
3877 if (base != NULL_RTX)
3878 {
3879 int regno = REGNO (base);
3880
3881 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3882 {
3883 /* Check if the compiler has recorded some information
3884 about the alignment of the base REG. If reload has
3885 completed, we already matched with proper alignments.
3886 If not running global_alloc, reload might give us
3887 unaligned pointer to local stack though. */
3888 if (((cfun != 0
3889 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3890 || (optimize && reload_completed))
3891 && (INTVAL (offset) & (desired - 1)) == 0)
3892 return 1;
3893 }
3894 else
3895 {
3896 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3897 return 1;
3898 }
3899 }
3900 else if (! TARGET_UNALIGNED_DOUBLES
3901 || CONSTANT_P (addr)
3902 || GET_CODE (addr) == LO_SUM)
3903 {
3904 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3905 is true, in which case we can only assume that an access is aligned if
3906 it is to a constant address, or the address involves a LO_SUM. */
3907 return 1;
3908 }
3909
3910 /* An obviously unaligned address. */
3911 return 0;
3912 }
3913
3914 \f
3915 /* Vectors to keep interesting information about registers where it can easily
3916 be got. We used to use the actual mode value as the bit number, but there
3917 are more than 32 modes now. Instead we use two tables: one indexed by
3918 hard register number, and one indexed by mode. */
3919
3920 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3921 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3922 mapped into one sparc_mode_class mode. */
3923
3924 enum sparc_mode_class {
3925 S_MODE, D_MODE, T_MODE, O_MODE,
3926 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3927 CC_MODE, CCFP_MODE
3928 };
3929
3930 /* Modes for single-word and smaller quantities. */
3931 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3932
3933 /* Modes for double-word and smaller quantities. */
3934 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3935
3936 /* Modes for quad-word and smaller quantities. */
3937 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3938
3939 /* Modes for 8-word and smaller quantities. */
3940 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3941
3942 /* Modes for single-float quantities. We must allow any single word or
3943 smaller quantity. This is because the fix/float conversion instructions
3944 take integer inputs/outputs from the float registers. */
3945 #define SF_MODES (S_MODES)
3946
3947 /* Modes for double-float and smaller quantities. */
3948 #define DF_MODES (D_MODES)
3949
3950 /* Modes for quad-float and smaller quantities. */
3951 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3952
3953 /* Modes for quad-float pairs and smaller quantities. */
3954 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3955
3956 /* Modes for double-float only quantities. */
3957 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3958
3959 /* Modes for quad-float and double-float only quantities. */
3960 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3961
3962 /* Modes for quad-float pairs and double-float only quantities. */
3963 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3964
3965 /* Modes for condition codes. */
3966 #define CC_MODES (1 << (int) CC_MODE)
3967 #define CCFP_MODES (1 << (int) CCFP_MODE)
3968
3969 /* Value is 1 if register/mode pair is acceptable on sparc.
3970 The funny mixture of D and T modes is because integer operations
3971 do not specially operate on tetra quantities, so non-quad-aligned
3972 registers can hold quadword quantities (except %o4 and %i4 because
3973 they cross fixed registers). */
3974
3975 /* This points to either the 32 bit or the 64 bit version. */
3976 const int *hard_regno_mode_classes;
3977
3978 static const int hard_32bit_mode_classes[] = {
3979 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3980 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3981 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3982 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3983
3984 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3985 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3986 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3987 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3988
3989 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3990 and none can hold SFmode/SImode values. */
3991 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3992 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3993 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3994 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3995
3996 /* %fcc[0123] */
3997 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3998
3999 /* %icc */
4000 CC_MODES
4001 };
4002
4003 static const int hard_64bit_mode_classes[] = {
4004 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4005 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4006 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4007 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
4008
4009 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4010 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4011 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
4012 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
4013
4014 /* FP regs f32 to f63. Only the even numbered registers actually exist,
4015 and none can hold SFmode/SImode values. */
4016 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4017 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4018 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4019 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
4020
4021 /* %fcc[0123] */
4022 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
4023
4024 /* %icc */
4025 CC_MODES
4026 };
4027
4028 int sparc_mode_class [NUM_MACHINE_MODES];
4029
4030 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
4031
4032 static void
4033 sparc_init_modes (void)
4034 {
4035 int i;
4036
4037 for (i = 0; i < NUM_MACHINE_MODES; i++)
4038 {
4039 switch (GET_MODE_CLASS (i))
4040 {
4041 case MODE_INT:
4042 case MODE_PARTIAL_INT:
4043 case MODE_COMPLEX_INT:
4044 if (GET_MODE_SIZE (i) <= 4)
4045 sparc_mode_class[i] = 1 << (int) S_MODE;
4046 else if (GET_MODE_SIZE (i) == 8)
4047 sparc_mode_class[i] = 1 << (int) D_MODE;
4048 else if (GET_MODE_SIZE (i) == 16)
4049 sparc_mode_class[i] = 1 << (int) T_MODE;
4050 else if (GET_MODE_SIZE (i) == 32)
4051 sparc_mode_class[i] = 1 << (int) O_MODE;
4052 else
4053 sparc_mode_class[i] = 0;
4054 break;
4055 case MODE_VECTOR_INT:
4056 if (GET_MODE_SIZE (i) <= 4)
4057 sparc_mode_class[i] = 1 << (int)SF_MODE;
4058 else if (GET_MODE_SIZE (i) == 8)
4059 sparc_mode_class[i] = 1 << (int)DF_MODE;
4060 break;
4061 case MODE_FLOAT:
4062 case MODE_COMPLEX_FLOAT:
4063 if (GET_MODE_SIZE (i) <= 4)
4064 sparc_mode_class[i] = 1 << (int) SF_MODE;
4065 else if (GET_MODE_SIZE (i) == 8)
4066 sparc_mode_class[i] = 1 << (int) DF_MODE;
4067 else if (GET_MODE_SIZE (i) == 16)
4068 sparc_mode_class[i] = 1 << (int) TF_MODE;
4069 else if (GET_MODE_SIZE (i) == 32)
4070 sparc_mode_class[i] = 1 << (int) OF_MODE;
4071 else
4072 sparc_mode_class[i] = 0;
4073 break;
4074 case MODE_CC:
4075 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4076 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4077 else
4078 sparc_mode_class[i] = 1 << (int) CC_MODE;
4079 break;
4080 default:
4081 sparc_mode_class[i] = 0;
4082 break;
4083 }
4084 }
4085
4086 if (TARGET_ARCH64)
4087 hard_regno_mode_classes = hard_64bit_mode_classes;
4088 else
4089 hard_regno_mode_classes = hard_32bit_mode_classes;
4090
4091 /* Initialize the array used by REGNO_REG_CLASS. */
4092 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4093 {
4094 if (i < 16 && TARGET_V8PLUS)
4095 sparc_regno_reg_class[i] = I64_REGS;
4096 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4097 sparc_regno_reg_class[i] = GENERAL_REGS;
4098 else if (i < 64)
4099 sparc_regno_reg_class[i] = FP_REGS;
4100 else if (i < 96)
4101 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4102 else if (i < 100)
4103 sparc_regno_reg_class[i] = FPCC_REGS;
4104 else
4105 sparc_regno_reg_class[i] = NO_REGS;
4106 }
4107 }
4108 \f
4109 /* Return whether REGNO, a global or FP register, must be saved/restored. */
4110
4111 static inline bool
4112 save_global_or_fp_reg_p (unsigned int regno,
4113 int leaf_function ATTRIBUTE_UNUSED)
4114 {
4115 return !call_used_regs[regno] && df_regs_ever_live_p (regno);
4116 }
4117
4118 /* Return whether the return address register (%i7) is needed. */
4119
4120 static inline bool
4121 return_addr_reg_needed_p (int leaf_function)
4122 {
4123 /* If it is live, for example because of __builtin_return_address (0). */
4124 if (df_regs_ever_live_p (RETURN_ADDR_REGNUM))
4125 return true;
4126
4127 /* Otherwise, it is needed as save register if %o7 is clobbered. */
4128 if (!leaf_function
4129 /* Loading the GOT register clobbers %o7. */
4130 || crtl->uses_pic_offset_table
4131 || df_regs_ever_live_p (INCOMING_RETURN_ADDR_REGNUM))
4132 return true;
4133
4134 return false;
4135 }
4136
4137 /* Return whether REGNO, a local or in register, must be saved/restored. */
4138
4139 static bool
4140 save_local_or_in_reg_p (unsigned int regno, int leaf_function)
4141 {
4142 /* General case: call-saved registers live at some point. */
4143 if (!call_used_regs[regno] && df_regs_ever_live_p (regno))
4144 return true;
4145
4146 /* Frame pointer register (%fp) if needed. */
4147 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
4148 return true;
4149
4150 /* Return address register (%i7) if needed. */
4151 if (regno == RETURN_ADDR_REGNUM && return_addr_reg_needed_p (leaf_function))
4152 return true;
4153
4154 /* GOT register (%l7) if needed. */
4155 if (regno == PIC_OFFSET_TABLE_REGNUM && crtl->uses_pic_offset_table)
4156 return true;
4157
4158 /* If the function accesses prior frames, the frame pointer and the return
4159 address of the previous frame must be saved on the stack. */
4160 if (crtl->accesses_prior_frames
4161 && (regno == HARD_FRAME_POINTER_REGNUM || regno == RETURN_ADDR_REGNUM))
4162 return true;
4163
4164 return false;
4165 }
4166
4167 /* Compute the frame size required by the function. This function is called
4168 during the reload pass and also by sparc_expand_prologue. */
4169
4170 HOST_WIDE_INT
4171 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function)
4172 {
4173 HOST_WIDE_INT frame_size, apparent_frame_size;
4174 int args_size, n_global_fp_regs = 0;
4175 bool save_local_in_regs_p = false;
4176 unsigned int i;
4177
4178 /* If the function allocates dynamic stack space, the dynamic offset is
4179 computed early and contains REG_PARM_STACK_SPACE, so we need to cope. */
4180 if (leaf_function && !cfun->calls_alloca)
4181 args_size = 0;
4182 else
4183 args_size = crtl->outgoing_args_size + REG_PARM_STACK_SPACE (cfun->decl);
4184
4185 /* Calculate space needed for global registers. */
4186 if (TARGET_ARCH64)
4187 for (i = 0; i < 8; i++)
4188 if (save_global_or_fp_reg_p (i, 0))
4189 n_global_fp_regs += 2;
4190 else
4191 for (i = 0; i < 8; i += 2)
4192 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4193 n_global_fp_regs += 2;
4194
4195 /* In the flat window model, find out which local and in registers need to
4196 be saved. We don't reserve space in the current frame for them as they
4197 will be spilled into the register window save area of the caller's frame.
4198 However, as soon as we use this register window save area, we must create
4199 that of the current frame to make it the live one. */
4200 if (TARGET_FLAT)
4201 for (i = 16; i < 32; i++)
4202 if (save_local_or_in_reg_p (i, leaf_function))
4203 {
4204 save_local_in_regs_p = true;
4205 break;
4206 }
4207
4208 /* Calculate space needed for FP registers. */
4209 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4210 if (save_global_or_fp_reg_p (i, 0) || save_global_or_fp_reg_p (i + 1, 0))
4211 n_global_fp_regs += 2;
4212
4213 if (size == 0
4214 && n_global_fp_regs == 0
4215 && args_size == 0
4216 && !save_local_in_regs_p)
4217 frame_size = apparent_frame_size = 0;
4218 else
4219 {
4220 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4221 apparent_frame_size = (size - STARTING_FRAME_OFFSET + 7) & -8;
4222 apparent_frame_size += n_global_fp_regs * 4;
4223
4224 /* We need to add the size of the outgoing argument area. */
4225 frame_size = apparent_frame_size + ((args_size + 7) & -8);
4226
4227 /* And that of the register window save area. */
4228 frame_size += FIRST_PARM_OFFSET (cfun->decl);
4229
4230 /* Finally, bump to the appropriate alignment. */
4231 frame_size = SPARC_STACK_ALIGN (frame_size);
4232 }
4233
4234 /* Set up values for use in prologue and epilogue. */
4235 sparc_frame_size = frame_size;
4236 sparc_apparent_frame_size = apparent_frame_size;
4237 sparc_n_global_fp_regs = n_global_fp_regs;
4238 sparc_save_local_in_regs_p = save_local_in_regs_p;
4239
4240 return frame_size;
4241 }
4242
4243 /* Output any necessary .register pseudo-ops. */
4244
4245 void
4246 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4247 {
4248 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4249 int i;
4250
4251 if (TARGET_ARCH32)
4252 return;
4253
4254 /* Check if %g[2367] were used without
4255 .register being printed for them already. */
4256 for (i = 2; i < 8; i++)
4257 {
4258 if (df_regs_ever_live_p (i)
4259 && ! sparc_hard_reg_printed [i])
4260 {
4261 sparc_hard_reg_printed [i] = 1;
4262 /* %g7 is used as TLS base register, use #ignore
4263 for it instead of #scratch. */
4264 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4265 i == 7 ? "ignore" : "scratch");
4266 }
4267 if (i == 3) i = 5;
4268 }
4269 #endif
4270 }
4271
4272 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4273
4274 #if PROBE_INTERVAL > 4096
4275 #error Cannot use indexed addressing mode for stack probing
4276 #endif
4277
4278 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4279 inclusive. These are offsets from the current stack pointer.
4280
4281 Note that we don't use the REG+REG addressing mode for the probes because
4282 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4283 so the advantages of having a single code win here. */
4284
4285 static void
4286 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4287 {
4288 rtx g1 = gen_rtx_REG (Pmode, 1);
4289
4290 /* See if we have a constant small number of probes to generate. If so,
4291 that's the easy case. */
4292 if (size <= PROBE_INTERVAL)
4293 {
4294 emit_move_insn (g1, GEN_INT (first));
4295 emit_insn (gen_rtx_SET (VOIDmode, g1,
4296 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4297 emit_stack_probe (plus_constant (g1, -size));
4298 }
4299
4300 /* The run-time loop is made up of 10 insns in the generic case while the
4301 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4302 else if (size <= 5 * PROBE_INTERVAL)
4303 {
4304 HOST_WIDE_INT i;
4305
4306 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4307 emit_insn (gen_rtx_SET (VOIDmode, g1,
4308 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4309 emit_stack_probe (g1);
4310
4311 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4312 it exceeds SIZE. If only two probes are needed, this will not
4313 generate any code. Then probe at FIRST + SIZE. */
4314 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4315 {
4316 emit_insn (gen_rtx_SET (VOIDmode, g1,
4317 plus_constant (g1, -PROBE_INTERVAL)));
4318 emit_stack_probe (g1);
4319 }
4320
4321 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4322 }
4323
4324 /* Otherwise, do the same as above, but in a loop. Note that we must be
4325 extra careful with variables wrapping around because we might be at
4326 the very top (or the very bottom) of the address space and we have
4327 to be able to handle this case properly; in particular, we use an
4328 equality test for the loop condition. */
4329 else
4330 {
4331 HOST_WIDE_INT rounded_size;
4332 rtx g4 = gen_rtx_REG (Pmode, 4);
4333
4334 emit_move_insn (g1, GEN_INT (first));
4335
4336
4337 /* Step 1: round SIZE to the previous multiple of the interval. */
4338
4339 rounded_size = size & -PROBE_INTERVAL;
4340 emit_move_insn (g4, GEN_INT (rounded_size));
4341
4342
4343 /* Step 2: compute initial and final value of the loop counter. */
4344
4345 /* TEST_ADDR = SP + FIRST. */
4346 emit_insn (gen_rtx_SET (VOIDmode, g1,
4347 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4348
4349 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4350 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4351
4352
4353 /* Step 3: the loop
4354
4355 while (TEST_ADDR != LAST_ADDR)
4356 {
4357 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4358 probe at TEST_ADDR
4359 }
4360
4361 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4362 until it is equal to ROUNDED_SIZE. */
4363
4364 if (TARGET_64BIT)
4365 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4366 else
4367 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4368
4369
4370 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4371 that SIZE is equal to ROUNDED_SIZE. */
4372
4373 if (size != rounded_size)
4374 emit_stack_probe (plus_constant (g4, rounded_size - size));
4375 }
4376
4377 /* Make sure nothing is scheduled before we are done. */
4378 emit_insn (gen_blockage ());
4379 }
4380
4381 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4382 absolute addresses. */
4383
4384 const char *
4385 output_probe_stack_range (rtx reg1, rtx reg2)
4386 {
4387 static int labelno = 0;
4388 char loop_lab[32], end_lab[32];
4389 rtx xops[2];
4390
4391 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4392 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4393
4394 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4395
4396 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4397 xops[0] = reg1;
4398 xops[1] = reg2;
4399 output_asm_insn ("cmp\t%0, %1", xops);
4400 if (TARGET_ARCH64)
4401 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4402 else
4403 fputs ("\tbe\t", asm_out_file);
4404 assemble_name_raw (asm_out_file, end_lab);
4405 fputc ('\n', asm_out_file);
4406
4407 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4408 xops[1] = GEN_INT (-PROBE_INTERVAL);
4409 output_asm_insn (" add\t%0, %1, %0", xops);
4410
4411 /* Probe at TEST_ADDR and branch. */
4412 if (TARGET_ARCH64)
4413 fputs ("\tba,pt\t%xcc,", asm_out_file);
4414 else
4415 fputs ("\tba\t", asm_out_file);
4416 assemble_name_raw (asm_out_file, loop_lab);
4417 fputc ('\n', asm_out_file);
4418 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4419 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4420
4421 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4422
4423 return "";
4424 }
4425
4426 /* Emit code to save/restore registers from LOW to HIGH at BASE+OFFSET as
4427 needed. LOW is supposed to be double-word aligned for 32-bit registers.
4428 SAVE_P decides whether a register must be saved/restored. ACTION_TRUE
4429 is the action to be performed if SAVE_P returns true and ACTION_FALSE
4430 the action to be performed if it returns false. Return the new offset. */
4431
4432 typedef bool (*sorr_pred_t) (unsigned int, int);
4433 typedef enum { SORR_NONE, SORR_ADVANCE, SORR_SAVE, SORR_RESTORE } sorr_act_t;
4434
4435 static int
4436 emit_save_or_restore_regs (unsigned int low, unsigned int high, rtx base,
4437 int offset, int leaf_function, sorr_pred_t save_p,
4438 sorr_act_t action_true, sorr_act_t action_false)
4439 {
4440 unsigned int i;
4441 rtx mem, insn;
4442
4443 if (TARGET_ARCH64 && high <= 32)
4444 {
4445 int fp_offset = -1;
4446
4447 for (i = low; i < high; i++)
4448 {
4449 if (save_p (i, leaf_function))
4450 {
4451 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4452 if (action_true == SORR_SAVE)
4453 {
4454 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4455 RTX_FRAME_RELATED_P (insn) = 1;
4456 }
4457 else /* action_true == SORR_RESTORE */
4458 {
4459 /* The frame pointer must be restored last since its old
4460 value may be used as base address for the frame. This
4461 is problematic in 64-bit mode only because of the lack
4462 of double-word load instruction. */
4463 if (i == HARD_FRAME_POINTER_REGNUM)
4464 fp_offset = offset;
4465 else
4466 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4467 }
4468 offset += 8;
4469 }
4470 else if (action_false == SORR_ADVANCE)
4471 offset += 8;
4472 }
4473
4474 if (fp_offset >= 0)
4475 {
4476 mem = gen_frame_mem (DImode, plus_constant (base, fp_offset));
4477 emit_move_insn (hard_frame_pointer_rtx, mem);
4478 }
4479 }
4480 else
4481 {
4482 for (i = low; i < high; i += 2)
4483 {
4484 bool reg0 = save_p (i, leaf_function);
4485 bool reg1 = save_p (i + 1, leaf_function);
4486 enum machine_mode mode;
4487 int regno;
4488
4489 if (reg0 && reg1)
4490 {
4491 mode = i < 32 ? DImode : DFmode;
4492 regno = i;
4493 }
4494 else if (reg0)
4495 {
4496 mode = i < 32 ? SImode : SFmode;
4497 regno = i;
4498 }
4499 else if (reg1)
4500 {
4501 mode = i < 32 ? SImode : SFmode;
4502 regno = i + 1;
4503 offset += 4;
4504 }
4505 else
4506 {
4507 if (action_false == SORR_ADVANCE)
4508 offset += 8;
4509 continue;
4510 }
4511
4512 mem = gen_frame_mem (mode, plus_constant (base, offset));
4513 if (action_true == SORR_SAVE)
4514 {
4515 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4516 RTX_FRAME_RELATED_P (insn) = 1;
4517 if (mode == DImode)
4518 {
4519 rtx set1, set2;
4520 mem = gen_frame_mem (SImode, plus_constant (base, offset));
4521 set1 = gen_rtx_SET (VOIDmode, mem,
4522 gen_rtx_REG (SImode, regno));
4523 RTX_FRAME_RELATED_P (set1) = 1;
4524 mem
4525 = gen_frame_mem (SImode, plus_constant (base, offset + 4));
4526 set2 = gen_rtx_SET (VOIDmode, mem,
4527 gen_rtx_REG (SImode, regno + 1));
4528 RTX_FRAME_RELATED_P (set2) = 1;
4529 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4530 gen_rtx_PARALLEL (VOIDmode,
4531 gen_rtvec (2, set1, set2)));
4532 }
4533 }
4534 else /* action_true == SORR_RESTORE */
4535 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4536
4537 /* Always preserve double-word alignment. */
4538 offset = (offset + 8) & -8;
4539 }
4540 }
4541
4542 return offset;
4543 }
4544
4545 /* Emit code to adjust BASE to OFFSET. Return the new base. */
4546
4547 static rtx
4548 emit_adjust_base_to_offset (rtx base, int offset)
4549 {
4550 /* ??? This might be optimized a little as %g1 might already have a
4551 value close enough that a single add insn will do. */
4552 /* ??? Although, all of this is probably only a temporary fix because
4553 if %g1 can hold a function result, then sparc_expand_epilogue will
4554 lose (the result will be clobbered). */
4555 rtx new_base = gen_rtx_REG (Pmode, 1);
4556 emit_move_insn (new_base, GEN_INT (offset));
4557 emit_insn (gen_rtx_SET (VOIDmode,
4558 new_base, gen_rtx_PLUS (Pmode, base, new_base)));
4559 return new_base;
4560 }
4561
4562 /* Emit code to save/restore call-saved global and FP registers. */
4563
4564 static void
4565 emit_save_or_restore_global_fp_regs (rtx base, int offset, sorr_act_t action)
4566 {
4567 if (offset < -4096 || offset + sparc_n_global_fp_regs * 4 > 4095)
4568 {
4569 base = emit_adjust_base_to_offset (base, offset);
4570 offset = 0;
4571 }
4572
4573 offset
4574 = emit_save_or_restore_regs (0, 8, base, offset, 0,
4575 save_global_or_fp_reg_p, action, SORR_NONE);
4576 emit_save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, 0,
4577 save_global_or_fp_reg_p, action, SORR_NONE);
4578 }
4579
4580 /* Emit code to save/restore call-saved local and in registers. */
4581
4582 static void
4583 emit_save_or_restore_local_in_regs (rtx base, int offset, sorr_act_t action)
4584 {
4585 if (offset < -4096 || offset + 16 * UNITS_PER_WORD > 4095)
4586 {
4587 base = emit_adjust_base_to_offset (base, offset);
4588 offset = 0;
4589 }
4590
4591 emit_save_or_restore_regs (16, 32, base, offset, sparc_leaf_function_p,
4592 save_local_or_in_reg_p, action, SORR_ADVANCE);
4593 }
4594
4595 /* Emit a window_save insn. */
4596
4597 static rtx
4598 emit_window_save (rtx increment)
4599 {
4600 rtx insn = emit_insn (gen_window_save (increment));
4601 RTX_FRAME_RELATED_P (insn) = 1;
4602
4603 /* The incoming return address (%o7) is saved in %i7. */
4604 add_reg_note (insn, REG_CFA_REGISTER,
4605 gen_rtx_SET (VOIDmode,
4606 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM),
4607 gen_rtx_REG (Pmode,
4608 INCOMING_RETURN_ADDR_REGNUM)));
4609
4610 /* The window save event. */
4611 add_reg_note (insn, REG_CFA_WINDOW_SAVE, const0_rtx);
4612
4613 /* The CFA is %fp, the hard frame pointer. */
4614 add_reg_note (insn, REG_CFA_DEF_CFA,
4615 plus_constant (hard_frame_pointer_rtx,
4616 INCOMING_FRAME_SP_OFFSET));
4617
4618 return insn;
4619 }
4620
4621 /* Generate an increment for the stack pointer. */
4622
4623 static rtx
4624 gen_stack_pointer_inc (rtx increment)
4625 {
4626 return gen_rtx_SET (VOIDmode,
4627 stack_pointer_rtx,
4628 gen_rtx_PLUS (Pmode,
4629 stack_pointer_rtx,
4630 increment));
4631 }
4632
4633 /* Generate a decrement for the stack pointer. */
4634
4635 static rtx
4636 gen_stack_pointer_dec (rtx decrement)
4637 {
4638 return gen_rtx_SET (VOIDmode,
4639 stack_pointer_rtx,
4640 gen_rtx_MINUS (Pmode,
4641 stack_pointer_rtx,
4642 decrement));
4643 }
4644
4645 /* Expand the function prologue. The prologue is responsible for reserving
4646 storage for the frame, saving the call-saved registers and loading the
4647 GOT register if needed. */
4648
4649 void
4650 sparc_expand_prologue (void)
4651 {
4652 HOST_WIDE_INT size;
4653 rtx insn;
4654
4655 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4656 on the final value of the flag means deferring the prologue/epilogue
4657 expansion until just before the second scheduling pass, which is too
4658 late to emit multiple epilogues or return insns.
4659
4660 Of course we are making the assumption that the value of the flag
4661 will not change between now and its final value. Of the three parts
4662 of the formula, only the last one can reasonably vary. Let's take a
4663 closer look, after assuming that the first two ones are set to true
4664 (otherwise the last value is effectively silenced).
4665
4666 If only_leaf_regs_used returns false, the global predicate will also
4667 be false so the actual frame size calculated below will be positive.
4668 As a consequence, the save_register_window insn will be emitted in
4669 the instruction stream; now this insn explicitly references %fp
4670 which is not a leaf register so only_leaf_regs_used will always
4671 return false subsequently.
4672
4673 If only_leaf_regs_used returns true, we hope that the subsequent
4674 optimization passes won't cause non-leaf registers to pop up. For
4675 example, the regrename pass has special provisions to not rename to
4676 non-leaf registers in a leaf function. */
4677 sparc_leaf_function_p
4678 = optimize > 0 && current_function_is_leaf && only_leaf_regs_used ();
4679
4680 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4681
4682 if (flag_stack_usage_info)
4683 current_function_static_stack_size = size;
4684
4685 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4686 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4687
4688 if (size == 0)
4689 ; /* do nothing. */
4690 else if (sparc_leaf_function_p)
4691 {
4692 rtx size_int_rtx = GEN_INT (-size);
4693
4694 if (size <= 4096)
4695 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4696 else if (size <= 8192)
4697 {
4698 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4699 /* %sp is still the CFA register. */
4700 RTX_FRAME_RELATED_P (insn) = 1;
4701 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4702 }
4703 else
4704 {
4705 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4706 emit_move_insn (size_rtx, size_int_rtx);
4707 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4708 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4709 gen_stack_pointer_inc (size_int_rtx));
4710 }
4711
4712 RTX_FRAME_RELATED_P (insn) = 1;
4713 }
4714 else
4715 {
4716 rtx size_int_rtx = GEN_INT (-size);
4717
4718 if (size <= 4096)
4719 emit_window_save (size_int_rtx);
4720 else if (size <= 8192)
4721 {
4722 emit_window_save (GEN_INT (-4096));
4723 /* %sp is not the CFA register anymore. */
4724 emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4725 }
4726 else
4727 {
4728 rtx size_rtx = gen_rtx_REG (Pmode, 1);
4729 emit_move_insn (size_rtx, size_int_rtx);
4730 emit_window_save (size_rtx);
4731 }
4732 }
4733
4734 if (sparc_leaf_function_p)
4735 {
4736 sparc_frame_base_reg = stack_pointer_rtx;
4737 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4738 }
4739 else
4740 {
4741 sparc_frame_base_reg = hard_frame_pointer_rtx;
4742 sparc_frame_base_offset = SPARC_STACK_BIAS;
4743 }
4744
4745 if (sparc_n_global_fp_regs > 0)
4746 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4747 sparc_frame_base_offset
4748 - sparc_apparent_frame_size,
4749 SORR_SAVE);
4750
4751 /* Load the GOT register if needed. */
4752 if (crtl->uses_pic_offset_table)
4753 load_got_register ();
4754
4755 /* Advertise that the data calculated just above are now valid. */
4756 sparc_prologue_data_valid_p = true;
4757 }
4758
4759 /* Expand the function prologue. The prologue is responsible for reserving
4760 storage for the frame, saving the call-saved registers and loading the
4761 GOT register if needed. */
4762
4763 void
4764 sparc_flat_expand_prologue (void)
4765 {
4766 HOST_WIDE_INT size;
4767 rtx insn;
4768
4769 sparc_leaf_function_p = optimize > 0 && current_function_is_leaf;
4770
4771 size = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4772
4773 if (flag_stack_usage_info)
4774 current_function_static_stack_size = size;
4775
4776 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && size)
4777 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
4778
4779 if (sparc_save_local_in_regs_p)
4780 emit_save_or_restore_local_in_regs (stack_pointer_rtx, SPARC_STACK_BIAS,
4781 SORR_SAVE);
4782
4783 if (size == 0)
4784 ; /* do nothing. */
4785 else
4786 {
4787 rtx size_int_rtx, size_rtx;
4788
4789 size_rtx = size_int_rtx = GEN_INT (-size);
4790
4791 /* We establish the frame (i.e. decrement the stack pointer) first, even
4792 if we use a frame pointer, because we cannot clobber any call-saved
4793 registers, including the frame pointer, if we haven't created a new
4794 register save area, for the sake of compatibility with the ABI. */
4795 if (size <= 4096)
4796 insn = emit_insn (gen_stack_pointer_inc (size_int_rtx));
4797 else if (size <= 8192 && !frame_pointer_needed)
4798 {
4799 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4800 RTX_FRAME_RELATED_P (insn) = 1;
4801 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
4802 }
4803 else
4804 {
4805 size_rtx = gen_rtx_REG (Pmode, 1);
4806 emit_move_insn (size_rtx, size_int_rtx);
4807 insn = emit_insn (gen_stack_pointer_inc (size_rtx));
4808 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4809 gen_stack_pointer_inc (size_int_rtx));
4810 }
4811 RTX_FRAME_RELATED_P (insn) = 1;
4812
4813 /* Ensure nothing is scheduled until after the frame is established. */
4814 emit_insn (gen_blockage ());
4815
4816 if (frame_pointer_needed)
4817 {
4818 insn = emit_insn (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4819 gen_rtx_MINUS (Pmode,
4820 stack_pointer_rtx,
4821 size_rtx)));
4822 RTX_FRAME_RELATED_P (insn) = 1;
4823
4824 add_reg_note (insn, REG_CFA_ADJUST_CFA,
4825 gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
4826 plus_constant (stack_pointer_rtx,
4827 size)));
4828 }
4829
4830 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4831 {
4832 rtx o7 = gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM);
4833 rtx i7 = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
4834
4835 insn = emit_move_insn (i7, o7);
4836 RTX_FRAME_RELATED_P (insn) = 1;
4837
4838 add_reg_note (insn, REG_CFA_REGISTER,
4839 gen_rtx_SET (VOIDmode, i7, o7));
4840
4841 /* Prevent this instruction from ever being considered dead,
4842 even if this function has no epilogue. */
4843 emit_insn (gen_rtx_USE (VOIDmode, i7));
4844 }
4845 }
4846
4847 if (frame_pointer_needed)
4848 {
4849 sparc_frame_base_reg = hard_frame_pointer_rtx;
4850 sparc_frame_base_offset = SPARC_STACK_BIAS;
4851 }
4852 else
4853 {
4854 sparc_frame_base_reg = stack_pointer_rtx;
4855 sparc_frame_base_offset = size + SPARC_STACK_BIAS;
4856 }
4857
4858 if (sparc_n_global_fp_regs > 0)
4859 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4860 sparc_frame_base_offset
4861 - sparc_apparent_frame_size,
4862 SORR_SAVE);
4863
4864 /* Load the GOT register if needed. */
4865 if (crtl->uses_pic_offset_table)
4866 load_got_register ();
4867
4868 /* Advertise that the data calculated just above are now valid. */
4869 sparc_prologue_data_valid_p = true;
4870 }
4871
4872 /* This function generates the assembly code for function entry, which boils
4873 down to emitting the necessary .register directives. */
4874
4875 static void
4876 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4877 {
4878 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4879 if (!TARGET_FLAT)
4880 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4881
4882 sparc_output_scratch_registers (file);
4883 }
4884
4885 /* Expand the function epilogue, either normal or part of a sibcall.
4886 We emit all the instructions except the return or the call. */
4887
4888 void
4889 sparc_expand_epilogue (bool for_eh)
4890 {
4891 HOST_WIDE_INT size = sparc_frame_size;
4892
4893 if (sparc_n_global_fp_regs > 0)
4894 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4895 sparc_frame_base_offset
4896 - sparc_apparent_frame_size,
4897 SORR_RESTORE);
4898
4899 if (size == 0 || for_eh)
4900 ; /* do nothing. */
4901 else if (sparc_leaf_function_p)
4902 {
4903 if (size <= 4096)
4904 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4905 else if (size <= 8192)
4906 {
4907 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4908 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
4909 }
4910 else
4911 {
4912 rtx reg = gen_rtx_REG (Pmode, 1);
4913 emit_move_insn (reg, GEN_INT (-size));
4914 emit_insn (gen_stack_pointer_dec (reg));
4915 }
4916 }
4917 }
4918
4919 /* Expand the function epilogue, either normal or part of a sibcall.
4920 We emit all the instructions except the return or the call. */
4921
4922 void
4923 sparc_flat_expand_epilogue (bool for_eh)
4924 {
4925 HOST_WIDE_INT size = sparc_frame_size;
4926
4927 if (sparc_n_global_fp_regs > 0)
4928 emit_save_or_restore_global_fp_regs (sparc_frame_base_reg,
4929 sparc_frame_base_offset
4930 - sparc_apparent_frame_size,
4931 SORR_RESTORE);
4932
4933 /* If we have a frame pointer, we'll need both to restore it before the
4934 frame is destroyed and use its current value in destroying the frame.
4935 Since we don't have an atomic way to do that in the flat window model,
4936 we save the current value into a temporary register (%g1). */
4937 if (frame_pointer_needed && !for_eh)
4938 emit_move_insn (gen_rtx_REG (Pmode, 1), hard_frame_pointer_rtx);
4939
4940 if (return_addr_reg_needed_p (sparc_leaf_function_p))
4941 emit_move_insn (gen_rtx_REG (Pmode, INCOMING_RETURN_ADDR_REGNUM),
4942 gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM));
4943
4944 if (sparc_save_local_in_regs_p)
4945 emit_save_or_restore_local_in_regs (sparc_frame_base_reg,
4946 sparc_frame_base_offset,
4947 SORR_RESTORE);
4948
4949 if (size == 0 || for_eh)
4950 ; /* do nothing. */
4951 else if (frame_pointer_needed)
4952 {
4953 /* Make sure the frame is destroyed after everything else is done. */
4954 emit_insn (gen_blockage ());
4955
4956 emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
4957 }
4958 else
4959 {
4960 /* Likewise. */
4961 emit_insn (gen_blockage ());
4962
4963 if (size <= 4096)
4964 emit_insn (gen_stack_pointer_dec (GEN_INT (-size)));
4965 else if (size <= 8192)
4966 {
4967 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4968 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - size)));
4969 }
4970 else
4971 {
4972 rtx reg = gen_rtx_REG (Pmode, 1);
4973 emit_move_insn (reg, GEN_INT (-size));
4974 emit_insn (gen_stack_pointer_dec (reg));
4975 }
4976 }
4977 }
4978
4979 /* Return true if it is appropriate to emit `return' instructions in the
4980 body of a function. */
4981
4982 bool
4983 sparc_can_use_return_insn_p (void)
4984 {
4985 return sparc_prologue_data_valid_p
4986 && sparc_n_global_fp_regs == 0
4987 && TARGET_FLAT
4988 ? (sparc_frame_size == 0 && !sparc_save_local_in_regs_p)
4989 : (sparc_frame_size == 0 || !sparc_leaf_function_p);
4990 }
4991
4992 /* This function generates the assembly code for function exit. */
4993
4994 static void
4995 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4996 {
4997 /* If the last two instructions of a function are "call foo; dslot;"
4998 the return address might point to the first instruction in the next
4999 function and we have to output a dummy nop for the sake of sane
5000 backtraces in such cases. This is pointless for sibling calls since
5001 the return address is explicitly adjusted. */
5002
5003 rtx insn, last_real_insn;
5004
5005 insn = get_last_insn ();
5006
5007 last_real_insn = prev_real_insn (insn);
5008 if (last_real_insn
5009 && GET_CODE (last_real_insn) == INSN
5010 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
5011 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
5012
5013 if (last_real_insn
5014 && CALL_P (last_real_insn)
5015 && !SIBLING_CALL_P (last_real_insn))
5016 fputs("\tnop\n", file);
5017
5018 sparc_output_deferred_case_vectors ();
5019 }
5020
5021 /* Output a 'restore' instruction. */
5022
5023 static void
5024 output_restore (rtx pat)
5025 {
5026 rtx operands[3];
5027
5028 if (! pat)
5029 {
5030 fputs ("\t restore\n", asm_out_file);
5031 return;
5032 }
5033
5034 gcc_assert (GET_CODE (pat) == SET);
5035
5036 operands[0] = SET_DEST (pat);
5037 pat = SET_SRC (pat);
5038
5039 switch (GET_CODE (pat))
5040 {
5041 case PLUS:
5042 operands[1] = XEXP (pat, 0);
5043 operands[2] = XEXP (pat, 1);
5044 output_asm_insn (" restore %r1, %2, %Y0", operands);
5045 break;
5046 case LO_SUM:
5047 operands[1] = XEXP (pat, 0);
5048 operands[2] = XEXP (pat, 1);
5049 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
5050 break;
5051 case ASHIFT:
5052 operands[1] = XEXP (pat, 0);
5053 gcc_assert (XEXP (pat, 1) == const1_rtx);
5054 output_asm_insn (" restore %r1, %r1, %Y0", operands);
5055 break;
5056 default:
5057 operands[1] = pat;
5058 output_asm_insn (" restore %%g0, %1, %Y0", operands);
5059 break;
5060 }
5061 }
5062
5063 /* Output a return. */
5064
5065 const char *
5066 output_return (rtx insn)
5067 {
5068 if (crtl->calls_eh_return)
5069 {
5070 /* If the function uses __builtin_eh_return, the eh_return
5071 machinery occupies the delay slot. */
5072 gcc_assert (!final_sequence);
5073
5074 if (flag_delayed_branch)
5075 {
5076 if (!TARGET_FLAT && TARGET_V9)
5077 fputs ("\treturn\t%i7+8\n", asm_out_file);
5078 else
5079 {
5080 if (!TARGET_FLAT)
5081 fputs ("\trestore\n", asm_out_file);
5082
5083 fputs ("\tjmp\t%o7+8\n", asm_out_file);
5084 }
5085
5086 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
5087 }
5088 else
5089 {
5090 if (!TARGET_FLAT)
5091 fputs ("\trestore\n", asm_out_file);
5092
5093 fputs ("\tadd\t%sp, %g1, %sp\n", asm_out_file);
5094 fputs ("\tjmp\t%o7+8\n\t nop\n", asm_out_file);
5095 }
5096 }
5097 else if (sparc_leaf_function_p || TARGET_FLAT)
5098 {
5099 /* This is a leaf or flat function so we don't have to bother restoring
5100 the register window, which frees us from dealing with the convoluted
5101 semantics of restore/return. We simply output the jump to the
5102 return address and the insn in the delay slot (if any). */
5103
5104 return "jmp\t%%o7+%)%#";
5105 }
5106 else
5107 {
5108 /* This is a regular function so we have to restore the register window.
5109 We may have a pending insn for the delay slot, which will be either
5110 combined with the 'restore' instruction or put in the delay slot of
5111 the 'return' instruction. */
5112
5113 if (final_sequence)
5114 {
5115 rtx delay, pat;
5116
5117 delay = NEXT_INSN (insn);
5118 gcc_assert (delay);
5119
5120 pat = PATTERN (delay);
5121
5122 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
5123 {
5124 epilogue_renumber (&pat, 0);
5125 return "return\t%%i7+%)%#";
5126 }
5127 else
5128 {
5129 output_asm_insn ("jmp\t%%i7+%)", NULL);
5130 output_restore (pat);
5131 PATTERN (delay) = gen_blockage ();
5132 INSN_CODE (delay) = -1;
5133 }
5134 }
5135 else
5136 {
5137 /* The delay slot is empty. */
5138 if (TARGET_V9)
5139 return "return\t%%i7+%)\n\t nop";
5140 else if (flag_delayed_branch)
5141 return "jmp\t%%i7+%)\n\t restore";
5142 else
5143 return "restore\n\tjmp\t%%o7+%)\n\t nop";
5144 }
5145 }
5146
5147 return "";
5148 }
5149
5150 /* Output a sibling call. */
5151
5152 const char *
5153 output_sibcall (rtx insn, rtx call_operand)
5154 {
5155 rtx operands[1];
5156
5157 gcc_assert (flag_delayed_branch);
5158
5159 operands[0] = call_operand;
5160
5161 if (sparc_leaf_function_p || TARGET_FLAT)
5162 {
5163 /* This is a leaf or flat function so we don't have to bother restoring
5164 the register window. We simply output the jump to the function and
5165 the insn in the delay slot (if any). */
5166
5167 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
5168
5169 if (final_sequence)
5170 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
5171 operands);
5172 else
5173 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
5174 it into branch if possible. */
5175 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
5176 operands);
5177 }
5178 else
5179 {
5180 /* This is a regular function so we have to restore the register window.
5181 We may have a pending insn for the delay slot, which will be combined
5182 with the 'restore' instruction. */
5183
5184 output_asm_insn ("call\t%a0, 0", operands);
5185
5186 if (final_sequence)
5187 {
5188 rtx delay = NEXT_INSN (insn);
5189 gcc_assert (delay);
5190
5191 output_restore (PATTERN (delay));
5192
5193 PATTERN (delay) = gen_blockage ();
5194 INSN_CODE (delay) = -1;
5195 }
5196 else
5197 output_restore (NULL_RTX);
5198 }
5199
5200 return "";
5201 }
5202 \f
5203 /* Functions for handling argument passing.
5204
5205 For 32-bit, the first 6 args are normally in registers and the rest are
5206 pushed. Any arg that starts within the first 6 words is at least
5207 partially passed in a register unless its data type forbids.
5208
5209 For 64-bit, the argument registers are laid out as an array of 16 elements
5210 and arguments are added sequentially. The first 6 int args and up to the
5211 first 16 fp args (depending on size) are passed in regs.
5212
5213 Slot Stack Integral Float Float in structure Double Long Double
5214 ---- ----- -------- ----- ------------------ ------ -----------
5215 15 [SP+248] %f31 %f30,%f31 %d30
5216 14 [SP+240] %f29 %f28,%f29 %d28 %q28
5217 13 [SP+232] %f27 %f26,%f27 %d26
5218 12 [SP+224] %f25 %f24,%f25 %d24 %q24
5219 11 [SP+216] %f23 %f22,%f23 %d22
5220 10 [SP+208] %f21 %f20,%f21 %d20 %q20
5221 9 [SP+200] %f19 %f18,%f19 %d18
5222 8 [SP+192] %f17 %f16,%f17 %d16 %q16
5223 7 [SP+184] %f15 %f14,%f15 %d14
5224 6 [SP+176] %f13 %f12,%f13 %d12 %q12
5225 5 [SP+168] %o5 %f11 %f10,%f11 %d10
5226 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
5227 3 [SP+152] %o3 %f7 %f6,%f7 %d6
5228 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
5229 1 [SP+136] %o1 %f3 %f2,%f3 %d2
5230 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
5231
5232 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
5233
5234 Integral arguments are always passed as 64-bit quantities appropriately
5235 extended.
5236
5237 Passing of floating point values is handled as follows.
5238 If a prototype is in scope:
5239 If the value is in a named argument (i.e. not a stdarg function or a
5240 value not part of the `...') then the value is passed in the appropriate
5241 fp reg.
5242 If the value is part of the `...' and is passed in one of the first 6
5243 slots then the value is passed in the appropriate int reg.
5244 If the value is part of the `...' and is not passed in one of the first 6
5245 slots then the value is passed in memory.
5246 If a prototype is not in scope:
5247 If the value is one of the first 6 arguments the value is passed in the
5248 appropriate integer reg and the appropriate fp reg.
5249 If the value is not one of the first 6 arguments the value is passed in
5250 the appropriate fp reg and in memory.
5251
5252
5253 Summary of the calling conventions implemented by GCC on the SPARC:
5254
5255 32-bit ABI:
5256 size argument return value
5257
5258 small integer <4 int. reg. int. reg.
5259 word 4 int. reg. int. reg.
5260 double word 8 int. reg. int. reg.
5261
5262 _Complex small integer <8 int. reg. int. reg.
5263 _Complex word 8 int. reg. int. reg.
5264 _Complex double word 16 memory int. reg.
5265
5266 vector integer <=8 int. reg. FP reg.
5267 vector integer >8 memory memory
5268
5269 float 4 int. reg. FP reg.
5270 double 8 int. reg. FP reg.
5271 long double 16 memory memory
5272
5273 _Complex float 8 memory FP reg.
5274 _Complex double 16 memory FP reg.
5275 _Complex long double 32 memory FP reg.
5276
5277 vector float any memory memory
5278
5279 aggregate any memory memory
5280
5281
5282
5283 64-bit ABI:
5284 size argument return value
5285
5286 small integer <8 int. reg. int. reg.
5287 word 8 int. reg. int. reg.
5288 double word 16 int. reg. int. reg.
5289
5290 _Complex small integer <16 int. reg. int. reg.
5291 _Complex word 16 int. reg. int. reg.
5292 _Complex double word 32 memory int. reg.
5293
5294 vector integer <=16 FP reg. FP reg.
5295 vector integer 16<s<=32 memory FP reg.
5296 vector integer >32 memory memory
5297
5298 float 4 FP reg. FP reg.
5299 double 8 FP reg. FP reg.
5300 long double 16 FP reg. FP reg.
5301
5302 _Complex float 8 FP reg. FP reg.
5303 _Complex double 16 FP reg. FP reg.
5304 _Complex long double 32 memory FP reg.
5305
5306 vector float <=16 FP reg. FP reg.
5307 vector float 16<s<=32 memory FP reg.
5308 vector float >32 memory memory
5309
5310 aggregate <=16 reg. reg.
5311 aggregate 16<s<=32 memory reg.
5312 aggregate >32 memory memory
5313
5314
5315
5316 Note #1: complex floating-point types follow the extended SPARC ABIs as
5317 implemented by the Sun compiler.
5318
5319 Note #2: integral vector types follow the scalar floating-point types
5320 conventions to match what is implemented by the Sun VIS SDK.
5321
5322 Note #3: floating-point vector types follow the aggregate types
5323 conventions. */
5324
5325
5326 /* Maximum number of int regs for args. */
5327 #define SPARC_INT_ARG_MAX 6
5328 /* Maximum number of fp regs for args. */
5329 #define SPARC_FP_ARG_MAX 16
5330
5331 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
5332
5333 /* Handle the INIT_CUMULATIVE_ARGS macro.
5334 Initialize a variable CUM of type CUMULATIVE_ARGS
5335 for a call to a function whose data type is FNTYPE.
5336 For a library call, FNTYPE is 0. */
5337
5338 void
5339 init_cumulative_args (struct sparc_args *cum, tree fntype,
5340 rtx libname ATTRIBUTE_UNUSED,
5341 tree fndecl ATTRIBUTE_UNUSED)
5342 {
5343 cum->words = 0;
5344 cum->prototype_p = fntype && prototype_p (fntype);
5345 cum->libcall_p = fntype == 0;
5346 }
5347
5348 /* Handle promotion of pointer and integer arguments. */
5349
5350 static enum machine_mode
5351 sparc_promote_function_mode (const_tree type,
5352 enum machine_mode mode,
5353 int *punsignedp,
5354 const_tree fntype ATTRIBUTE_UNUSED,
5355 int for_return ATTRIBUTE_UNUSED)
5356 {
5357 if (type != NULL_TREE && POINTER_TYPE_P (type))
5358 {
5359 *punsignedp = POINTERS_EXTEND_UNSIGNED;
5360 return Pmode;
5361 }
5362
5363 /* Integral arguments are passed as full words, as per the ABI. */
5364 if (GET_MODE_CLASS (mode) == MODE_INT
5365 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5366 return word_mode;
5367
5368 return mode;
5369 }
5370
5371 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
5372
5373 static bool
5374 sparc_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
5375 {
5376 return TARGET_ARCH64 ? true : false;
5377 }
5378
5379 /* Scan the record type TYPE and return the following predicates:
5380 - INTREGS_P: the record contains at least one field or sub-field
5381 that is eligible for promotion in integer registers.
5382 - FP_REGS_P: the record contains at least one field or sub-field
5383 that is eligible for promotion in floating-point registers.
5384 - PACKED_P: the record contains at least one field that is packed.
5385
5386 Sub-fields are not taken into account for the PACKED_P predicate. */
5387
5388 static void
5389 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5390 int *packed_p)
5391 {
5392 tree field;
5393
5394 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5395 {
5396 if (TREE_CODE (field) == FIELD_DECL)
5397 {
5398 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5399 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5400 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5401 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5402 && TARGET_FPU)
5403 *fpregs_p = 1;
5404 else
5405 *intregs_p = 1;
5406
5407 if (packed_p && DECL_PACKED (field))
5408 *packed_p = 1;
5409 }
5410 }
5411 }
5412
5413 /* Compute the slot number to pass an argument in.
5414 Return the slot number or -1 if passing on the stack.
5415
5416 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5417 the preceding args and about the function being called.
5418 MODE is the argument's machine mode.
5419 TYPE is the data type of the argument (as a tree).
5420 This is null for libcalls where that information may
5421 not be available.
5422 NAMED is nonzero if this argument is a named parameter
5423 (otherwise it is an extra parameter matching an ellipsis).
5424 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5425 *PREGNO records the register number to use if scalar type.
5426 *PPADDING records the amount of padding needed in words. */
5427
5428 static int
5429 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5430 const_tree type, bool named, bool incoming_p,
5431 int *pregno, int *ppadding)
5432 {
5433 int regbase = (incoming_p
5434 ? SPARC_INCOMING_INT_ARG_FIRST
5435 : SPARC_OUTGOING_INT_ARG_FIRST);
5436 int slotno = cum->words;
5437 enum mode_class mclass;
5438 int regno;
5439
5440 *ppadding = 0;
5441
5442 if (type && TREE_ADDRESSABLE (type))
5443 return -1;
5444
5445 if (TARGET_ARCH32
5446 && mode == BLKmode
5447 && type
5448 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5449 return -1;
5450
5451 /* For SPARC64, objects requiring 16-byte alignment get it. */
5452 if (TARGET_ARCH64
5453 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5454 && (slotno & 1) != 0)
5455 slotno++, *ppadding = 1;
5456
5457 mclass = GET_MODE_CLASS (mode);
5458 if (type && TREE_CODE (type) == VECTOR_TYPE)
5459 {
5460 /* Vector types deserve special treatment because they are
5461 polymorphic wrt their mode, depending upon whether VIS
5462 instructions are enabled. */
5463 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5464 {
5465 /* The SPARC port defines no floating-point vector modes. */
5466 gcc_assert (mode == BLKmode);
5467 }
5468 else
5469 {
5470 /* Integral vector types should either have a vector
5471 mode or an integral mode, because we are guaranteed
5472 by pass_by_reference that their size is not greater
5473 than 16 bytes and TImode is 16-byte wide. */
5474 gcc_assert (mode != BLKmode);
5475
5476 /* Vector integers are handled like floats according to
5477 the Sun VIS SDK. */
5478 mclass = MODE_FLOAT;
5479 }
5480 }
5481
5482 switch (mclass)
5483 {
5484 case MODE_FLOAT:
5485 case MODE_COMPLEX_FLOAT:
5486 case MODE_VECTOR_INT:
5487 if (TARGET_ARCH64 && TARGET_FPU && named)
5488 {
5489 if (slotno >= SPARC_FP_ARG_MAX)
5490 return -1;
5491 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5492 /* Arguments filling only one single FP register are
5493 right-justified in the outer double FP register. */
5494 if (GET_MODE_SIZE (mode) <= 4)
5495 regno++;
5496 break;
5497 }
5498 /* fallthrough */
5499
5500 case MODE_INT:
5501 case MODE_COMPLEX_INT:
5502 if (slotno >= SPARC_INT_ARG_MAX)
5503 return -1;
5504 regno = regbase + slotno;
5505 break;
5506
5507 case MODE_RANDOM:
5508 if (mode == VOIDmode)
5509 /* MODE is VOIDmode when generating the actual call. */
5510 return -1;
5511
5512 gcc_assert (mode == BLKmode);
5513
5514 if (TARGET_ARCH32
5515 || !type
5516 || (TREE_CODE (type) != VECTOR_TYPE
5517 && TREE_CODE (type) != RECORD_TYPE))
5518 {
5519 if (slotno >= SPARC_INT_ARG_MAX)
5520 return -1;
5521 regno = regbase + slotno;
5522 }
5523 else /* TARGET_ARCH64 && type */
5524 {
5525 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5526
5527 /* First see what kinds of registers we would need. */
5528 if (TREE_CODE (type) == VECTOR_TYPE)
5529 fpregs_p = 1;
5530 else
5531 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5532
5533 /* The ABI obviously doesn't specify how packed structures
5534 are passed. These are defined to be passed in int regs
5535 if possible, otherwise memory. */
5536 if (packed_p || !named)
5537 fpregs_p = 0, intregs_p = 1;
5538
5539 /* If all arg slots are filled, then must pass on stack. */
5540 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5541 return -1;
5542
5543 /* If there are only int args and all int arg slots are filled,
5544 then must pass on stack. */
5545 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5546 return -1;
5547
5548 /* Note that even if all int arg slots are filled, fp members may
5549 still be passed in regs if such regs are available.
5550 *PREGNO isn't set because there may be more than one, it's up
5551 to the caller to compute them. */
5552 return slotno;
5553 }
5554 break;
5555
5556 default :
5557 gcc_unreachable ();
5558 }
5559
5560 *pregno = regno;
5561 return slotno;
5562 }
5563
5564 /* Handle recursive register counting for structure field layout. */
5565
5566 struct function_arg_record_value_parms
5567 {
5568 rtx ret; /* return expression being built. */
5569 int slotno; /* slot number of the argument. */
5570 int named; /* whether the argument is named. */
5571 int regbase; /* regno of the base register. */
5572 int stack; /* 1 if part of the argument is on the stack. */
5573 int intoffset; /* offset of the first pending integer field. */
5574 unsigned int nregs; /* number of words passed in registers. */
5575 };
5576
5577 static void function_arg_record_value_3
5578 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5579 static void function_arg_record_value_2
5580 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5581 static void function_arg_record_value_1
5582 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5583 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5584 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5585
5586 /* A subroutine of function_arg_record_value. Traverse the structure
5587 recursively and determine how many registers will be required. */
5588
5589 static void
5590 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5591 struct function_arg_record_value_parms *parms,
5592 bool packed_p)
5593 {
5594 tree field;
5595
5596 /* We need to compute how many registers are needed so we can
5597 allocate the PARALLEL but before we can do that we need to know
5598 whether there are any packed fields. The ABI obviously doesn't
5599 specify how structures are passed in this case, so they are
5600 defined to be passed in int regs if possible, otherwise memory,
5601 regardless of whether there are fp values present. */
5602
5603 if (! packed_p)
5604 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5605 {
5606 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5607 {
5608 packed_p = true;
5609 break;
5610 }
5611 }
5612
5613 /* Compute how many registers we need. */
5614 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5615 {
5616 if (TREE_CODE (field) == FIELD_DECL)
5617 {
5618 HOST_WIDE_INT bitpos = startbitpos;
5619
5620 if (DECL_SIZE (field) != 0)
5621 {
5622 if (integer_zerop (DECL_SIZE (field)))
5623 continue;
5624
5625 if (host_integerp (bit_position (field), 1))
5626 bitpos += int_bit_position (field);
5627 }
5628
5629 /* ??? FIXME: else assume zero offset. */
5630
5631 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5632 function_arg_record_value_1 (TREE_TYPE (field),
5633 bitpos,
5634 parms,
5635 packed_p);
5636 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5637 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5638 && TARGET_FPU
5639 && parms->named
5640 && ! packed_p)
5641 {
5642 if (parms->intoffset != -1)
5643 {
5644 unsigned int startbit, endbit;
5645 int intslots, this_slotno;
5646
5647 startbit = parms->intoffset & -BITS_PER_WORD;
5648 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5649
5650 intslots = (endbit - startbit) / BITS_PER_WORD;
5651 this_slotno = parms->slotno + parms->intoffset
5652 / BITS_PER_WORD;
5653
5654 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5655 {
5656 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5657 /* We need to pass this field on the stack. */
5658 parms->stack = 1;
5659 }
5660
5661 parms->nregs += intslots;
5662 parms->intoffset = -1;
5663 }
5664
5665 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5666 If it wasn't true we wouldn't be here. */
5667 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5668 && DECL_MODE (field) == BLKmode)
5669 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5670 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5671 parms->nregs += 2;
5672 else
5673 parms->nregs += 1;
5674 }
5675 else
5676 {
5677 if (parms->intoffset == -1)
5678 parms->intoffset = bitpos;
5679 }
5680 }
5681 }
5682 }
5683
5684 /* A subroutine of function_arg_record_value. Assign the bits of the
5685 structure between parms->intoffset and bitpos to integer registers. */
5686
5687 static void
5688 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5689 struct function_arg_record_value_parms *parms)
5690 {
5691 enum machine_mode mode;
5692 unsigned int regno;
5693 unsigned int startbit, endbit;
5694 int this_slotno, intslots, intoffset;
5695 rtx reg;
5696
5697 if (parms->intoffset == -1)
5698 return;
5699
5700 intoffset = parms->intoffset;
5701 parms->intoffset = -1;
5702
5703 startbit = intoffset & -BITS_PER_WORD;
5704 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5705 intslots = (endbit - startbit) / BITS_PER_WORD;
5706 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5707
5708 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5709 if (intslots <= 0)
5710 return;
5711
5712 /* If this is the trailing part of a word, only load that much into
5713 the register. Otherwise load the whole register. Note that in
5714 the latter case we may pick up unwanted bits. It's not a problem
5715 at the moment but may wish to revisit. */
5716
5717 if (intoffset % BITS_PER_WORD != 0)
5718 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5719 MODE_INT);
5720 else
5721 mode = word_mode;
5722
5723 intoffset /= BITS_PER_UNIT;
5724 do
5725 {
5726 regno = parms->regbase + this_slotno;
5727 reg = gen_rtx_REG (mode, regno);
5728 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5729 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5730
5731 this_slotno += 1;
5732 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5733 mode = word_mode;
5734 parms->nregs += 1;
5735 intslots -= 1;
5736 }
5737 while (intslots > 0);
5738 }
5739
5740 /* A subroutine of function_arg_record_value. Traverse the structure
5741 recursively and assign bits to floating point registers. Track which
5742 bits in between need integer registers; invoke function_arg_record_value_3
5743 to make that happen. */
5744
5745 static void
5746 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5747 struct function_arg_record_value_parms *parms,
5748 bool packed_p)
5749 {
5750 tree field;
5751
5752 if (! packed_p)
5753 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5754 {
5755 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5756 {
5757 packed_p = true;
5758 break;
5759 }
5760 }
5761
5762 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5763 {
5764 if (TREE_CODE (field) == FIELD_DECL)
5765 {
5766 HOST_WIDE_INT bitpos = startbitpos;
5767
5768 if (DECL_SIZE (field) != 0)
5769 {
5770 if (integer_zerop (DECL_SIZE (field)))
5771 continue;
5772
5773 if (host_integerp (bit_position (field), 1))
5774 bitpos += int_bit_position (field);
5775 }
5776
5777 /* ??? FIXME: else assume zero offset. */
5778
5779 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5780 function_arg_record_value_2 (TREE_TYPE (field),
5781 bitpos,
5782 parms,
5783 packed_p);
5784 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5785 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5786 && TARGET_FPU
5787 && parms->named
5788 && ! packed_p)
5789 {
5790 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5791 int regno, nregs, pos;
5792 enum machine_mode mode = DECL_MODE (field);
5793 rtx reg;
5794
5795 function_arg_record_value_3 (bitpos, parms);
5796
5797 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5798 && mode == BLKmode)
5799 {
5800 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5801 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5802 }
5803 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5804 {
5805 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5806 nregs = 2;
5807 }
5808 else
5809 nregs = 1;
5810
5811 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5812 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5813 regno++;
5814 reg = gen_rtx_REG (mode, regno);
5815 pos = bitpos / BITS_PER_UNIT;
5816 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5817 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5818 parms->nregs += 1;
5819 while (--nregs > 0)
5820 {
5821 regno += GET_MODE_SIZE (mode) / 4;
5822 reg = gen_rtx_REG (mode, regno);
5823 pos += GET_MODE_SIZE (mode);
5824 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5825 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5826 parms->nregs += 1;
5827 }
5828 }
5829 else
5830 {
5831 if (parms->intoffset == -1)
5832 parms->intoffset = bitpos;
5833 }
5834 }
5835 }
5836 }
5837
5838 /* Used by function_arg and sparc_function_value_1 to implement the complex
5839 conventions of the 64-bit ABI for passing and returning structures.
5840 Return an expression valid as a return value for the FUNCTION_ARG
5841 and TARGET_FUNCTION_VALUE.
5842
5843 TYPE is the data type of the argument (as a tree).
5844 This is null for libcalls where that information may
5845 not be available.
5846 MODE is the argument's machine mode.
5847 SLOTNO is the index number of the argument's slot in the parameter array.
5848 NAMED is nonzero if this argument is a named parameter
5849 (otherwise it is an extra parameter matching an ellipsis).
5850 REGBASE is the regno of the base register for the parameter array. */
5851
5852 static rtx
5853 function_arg_record_value (const_tree type, enum machine_mode mode,
5854 int slotno, int named, int regbase)
5855 {
5856 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5857 struct function_arg_record_value_parms parms;
5858 unsigned int nregs;
5859
5860 parms.ret = NULL_RTX;
5861 parms.slotno = slotno;
5862 parms.named = named;
5863 parms.regbase = regbase;
5864 parms.stack = 0;
5865
5866 /* Compute how many registers we need. */
5867 parms.nregs = 0;
5868 parms.intoffset = 0;
5869 function_arg_record_value_1 (type, 0, &parms, false);
5870
5871 /* Take into account pending integer fields. */
5872 if (parms.intoffset != -1)
5873 {
5874 unsigned int startbit, endbit;
5875 int intslots, this_slotno;
5876
5877 startbit = parms.intoffset & -BITS_PER_WORD;
5878 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5879 intslots = (endbit - startbit) / BITS_PER_WORD;
5880 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5881
5882 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5883 {
5884 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5885 /* We need to pass this field on the stack. */
5886 parms.stack = 1;
5887 }
5888
5889 parms.nregs += intslots;
5890 }
5891 nregs = parms.nregs;
5892
5893 /* Allocate the vector and handle some annoying special cases. */
5894 if (nregs == 0)
5895 {
5896 /* ??? Empty structure has no value? Duh? */
5897 if (typesize <= 0)
5898 {
5899 /* Though there's nothing really to store, return a word register
5900 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5901 leads to breakage due to the fact that there are zero bytes to
5902 load. */
5903 return gen_rtx_REG (mode, regbase);
5904 }
5905 else
5906 {
5907 /* ??? C++ has structures with no fields, and yet a size. Give up
5908 for now and pass everything back in integer registers. */
5909 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5910 }
5911 if (nregs + slotno > SPARC_INT_ARG_MAX)
5912 nregs = SPARC_INT_ARG_MAX - slotno;
5913 }
5914 gcc_assert (nregs != 0);
5915
5916 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5917
5918 /* If at least one field must be passed on the stack, generate
5919 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5920 also be passed on the stack. We can't do much better because the
5921 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5922 of structures for which the fields passed exclusively in registers
5923 are not at the beginning of the structure. */
5924 if (parms.stack)
5925 XVECEXP (parms.ret, 0, 0)
5926 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5927
5928 /* Fill in the entries. */
5929 parms.nregs = 0;
5930 parms.intoffset = 0;
5931 function_arg_record_value_2 (type, 0, &parms, false);
5932 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5933
5934 gcc_assert (parms.nregs == nregs);
5935
5936 return parms.ret;
5937 }
5938
5939 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5940 of the 64-bit ABI for passing and returning unions.
5941 Return an expression valid as a return value for the FUNCTION_ARG
5942 and TARGET_FUNCTION_VALUE.
5943
5944 SIZE is the size in bytes of the union.
5945 MODE is the argument's machine mode.
5946 REGNO is the hard register the union will be passed in. */
5947
5948 static rtx
5949 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5950 int regno)
5951 {
5952 int nwords = ROUND_ADVANCE (size), i;
5953 rtx regs;
5954
5955 /* See comment in previous function for empty structures. */
5956 if (nwords == 0)
5957 return gen_rtx_REG (mode, regno);
5958
5959 if (slotno == SPARC_INT_ARG_MAX - 1)
5960 nwords = 1;
5961
5962 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5963
5964 for (i = 0; i < nwords; i++)
5965 {
5966 /* Unions are passed left-justified. */
5967 XVECEXP (regs, 0, i)
5968 = gen_rtx_EXPR_LIST (VOIDmode,
5969 gen_rtx_REG (word_mode, regno),
5970 GEN_INT (UNITS_PER_WORD * i));
5971 regno++;
5972 }
5973
5974 return regs;
5975 }
5976
5977 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5978 for passing and returning large (BLKmode) vectors.
5979 Return an expression valid as a return value for the FUNCTION_ARG
5980 and TARGET_FUNCTION_VALUE.
5981
5982 SIZE is the size in bytes of the vector (at least 8 bytes).
5983 REGNO is the FP hard register the vector will be passed in. */
5984
5985 static rtx
5986 function_arg_vector_value (int size, int regno)
5987 {
5988 int i, nregs = size / 8;
5989 rtx regs;
5990
5991 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5992
5993 for (i = 0; i < nregs; i++)
5994 {
5995 XVECEXP (regs, 0, i)
5996 = gen_rtx_EXPR_LIST (VOIDmode,
5997 gen_rtx_REG (DImode, regno + 2*i),
5998 GEN_INT (i*8));
5999 }
6000
6001 return regs;
6002 }
6003
6004 /* Determine where to put an argument to a function.
6005 Value is zero to push the argument on the stack,
6006 or a hard register in which to store the argument.
6007
6008 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6009 the preceding args and about the function being called.
6010 MODE is the argument's machine mode.
6011 TYPE is the data type of the argument (as a tree).
6012 This is null for libcalls where that information may
6013 not be available.
6014 NAMED is true if this argument is a named parameter
6015 (otherwise it is an extra parameter matching an ellipsis).
6016 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
6017 TARGET_FUNCTION_INCOMING_ARG. */
6018
6019 static rtx
6020 sparc_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
6021 const_tree type, bool named, bool incoming_p)
6022 {
6023 const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6024
6025 int regbase = (incoming_p
6026 ? SPARC_INCOMING_INT_ARG_FIRST
6027 : SPARC_OUTGOING_INT_ARG_FIRST);
6028 int slotno, regno, padding;
6029 enum mode_class mclass = GET_MODE_CLASS (mode);
6030
6031 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
6032 &regno, &padding);
6033 if (slotno == -1)
6034 return 0;
6035
6036 /* Vector types deserve special treatment because they are polymorphic wrt
6037 their mode, depending upon whether VIS instructions are enabled. */
6038 if (type && TREE_CODE (type) == VECTOR_TYPE)
6039 {
6040 HOST_WIDE_INT size = int_size_in_bytes (type);
6041 gcc_assert ((TARGET_ARCH32 && size <= 8)
6042 || (TARGET_ARCH64 && size <= 16));
6043
6044 if (mode == BLKmode)
6045 return function_arg_vector_value (size,
6046 SPARC_FP_ARG_FIRST + 2*slotno);
6047 else
6048 mclass = MODE_FLOAT;
6049 }
6050
6051 if (TARGET_ARCH32)
6052 return gen_rtx_REG (mode, regno);
6053
6054 /* Structures up to 16 bytes in size are passed in arg slots on the stack
6055 and are promoted to registers if possible. */
6056 if (type && TREE_CODE (type) == RECORD_TYPE)
6057 {
6058 HOST_WIDE_INT size = int_size_in_bytes (type);
6059 gcc_assert (size <= 16);
6060
6061 return function_arg_record_value (type, mode, slotno, named, regbase);
6062 }
6063
6064 /* Unions up to 16 bytes in size are passed in integer registers. */
6065 else if (type && TREE_CODE (type) == UNION_TYPE)
6066 {
6067 HOST_WIDE_INT size = int_size_in_bytes (type);
6068 gcc_assert (size <= 16);
6069
6070 return function_arg_union_value (size, mode, slotno, regno);
6071 }
6072
6073 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
6074 but also have the slot allocated for them.
6075 If no prototype is in scope fp values in register slots get passed
6076 in two places, either fp regs and int regs or fp regs and memory. */
6077 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6078 && SPARC_FP_REG_P (regno))
6079 {
6080 rtx reg = gen_rtx_REG (mode, regno);
6081 if (cum->prototype_p || cum->libcall_p)
6082 {
6083 /* "* 2" because fp reg numbers are recorded in 4 byte
6084 quantities. */
6085 #if 0
6086 /* ??? This will cause the value to be passed in the fp reg and
6087 in the stack. When a prototype exists we want to pass the
6088 value in the reg but reserve space on the stack. That's an
6089 optimization, and is deferred [for a bit]. */
6090 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
6091 return gen_rtx_PARALLEL (mode,
6092 gen_rtvec (2,
6093 gen_rtx_EXPR_LIST (VOIDmode,
6094 NULL_RTX, const0_rtx),
6095 gen_rtx_EXPR_LIST (VOIDmode,
6096 reg, const0_rtx)));
6097 else
6098 #else
6099 /* ??? It seems that passing back a register even when past
6100 the area declared by REG_PARM_STACK_SPACE will allocate
6101 space appropriately, and will not copy the data onto the
6102 stack, exactly as we desire.
6103
6104 This is due to locate_and_pad_parm being called in
6105 expand_call whenever reg_parm_stack_space > 0, which
6106 while beneficial to our example here, would seem to be
6107 in error from what had been intended. Ho hum... -- r~ */
6108 #endif
6109 return reg;
6110 }
6111 else
6112 {
6113 rtx v0, v1;
6114
6115 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
6116 {
6117 int intreg;
6118
6119 /* On incoming, we don't need to know that the value
6120 is passed in %f0 and %i0, and it confuses other parts
6121 causing needless spillage even on the simplest cases. */
6122 if (incoming_p)
6123 return reg;
6124
6125 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
6126 + (regno - SPARC_FP_ARG_FIRST) / 2);
6127
6128 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6129 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
6130 const0_rtx);
6131 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6132 }
6133 else
6134 {
6135 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
6136 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
6137 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
6138 }
6139 }
6140 }
6141
6142 /* All other aggregate types are passed in an integer register in a mode
6143 corresponding to the size of the type. */
6144 else if (type && AGGREGATE_TYPE_P (type))
6145 {
6146 HOST_WIDE_INT size = int_size_in_bytes (type);
6147 gcc_assert (size <= 16);
6148
6149 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6150 }
6151
6152 return gen_rtx_REG (mode, regno);
6153 }
6154
6155 /* Handle the TARGET_FUNCTION_ARG target hook. */
6156
6157 static rtx
6158 sparc_function_arg (cumulative_args_t cum, enum machine_mode mode,
6159 const_tree type, bool named)
6160 {
6161 return sparc_function_arg_1 (cum, mode, type, named, false);
6162 }
6163
6164 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
6165
6166 static rtx
6167 sparc_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
6168 const_tree type, bool named)
6169 {
6170 return sparc_function_arg_1 (cum, mode, type, named, true);
6171 }
6172
6173 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
6174
6175 static unsigned int
6176 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
6177 {
6178 return ((TARGET_ARCH64
6179 && (GET_MODE_ALIGNMENT (mode) == 128
6180 || (type && TYPE_ALIGN (type) == 128)))
6181 ? 128
6182 : PARM_BOUNDARY);
6183 }
6184
6185 /* For an arg passed partly in registers and partly in memory,
6186 this is the number of bytes of registers used.
6187 For args passed entirely in registers or entirely in memory, zero.
6188
6189 Any arg that starts in the first 6 regs but won't entirely fit in them
6190 needs partial registers on v8. On v9, structures with integer
6191 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
6192 values that begin in the last fp reg [where "last fp reg" varies with the
6193 mode] will be split between that reg and memory. */
6194
6195 static int
6196 sparc_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
6197 tree type, bool named)
6198 {
6199 int slotno, regno, padding;
6200
6201 /* We pass false for incoming_p here, it doesn't matter. */
6202 slotno = function_arg_slotno (get_cumulative_args (cum), mode, type, named,
6203 false, &regno, &padding);
6204
6205 if (slotno == -1)
6206 return 0;
6207
6208 if (TARGET_ARCH32)
6209 {
6210 if ((slotno + (mode == BLKmode
6211 ? ROUND_ADVANCE (int_size_in_bytes (type))
6212 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
6213 > SPARC_INT_ARG_MAX)
6214 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
6215 }
6216 else
6217 {
6218 /* We are guaranteed by pass_by_reference that the size of the
6219 argument is not greater than 16 bytes, so we only need to return
6220 one word if the argument is partially passed in registers. */
6221
6222 if (type && AGGREGATE_TYPE_P (type))
6223 {
6224 int size = int_size_in_bytes (type);
6225
6226 if (size > UNITS_PER_WORD
6227 && slotno == SPARC_INT_ARG_MAX - 1)
6228 return UNITS_PER_WORD;
6229 }
6230 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
6231 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6232 && ! (TARGET_FPU && named)))
6233 {
6234 /* The complex types are passed as packed types. */
6235 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6236 && slotno == SPARC_INT_ARG_MAX - 1)
6237 return UNITS_PER_WORD;
6238 }
6239 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6240 {
6241 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
6242 > SPARC_FP_ARG_MAX)
6243 return UNITS_PER_WORD;
6244 }
6245 }
6246
6247 return 0;
6248 }
6249
6250 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
6251 Specify whether to pass the argument by reference. */
6252
6253 static bool
6254 sparc_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6255 enum machine_mode mode, const_tree type,
6256 bool named ATTRIBUTE_UNUSED)
6257 {
6258 if (TARGET_ARCH32)
6259 /* Original SPARC 32-bit ABI says that structures and unions,
6260 and quad-precision floats are passed by reference. For Pascal,
6261 also pass arrays by reference. All other base types are passed
6262 in registers.
6263
6264 Extended ABI (as implemented by the Sun compiler) says that all
6265 complex floats are passed by reference. Pass complex integers
6266 in registers up to 8 bytes. More generally, enforce the 2-word
6267 cap for passing arguments in registers.
6268
6269 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6270 integers are passed like floats of the same size, that is in
6271 registers up to 8 bytes. Pass all vector floats by reference
6272 like structure and unions. */
6273 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
6274 || mode == SCmode
6275 /* Catch CDImode, TFmode, DCmode and TCmode. */
6276 || GET_MODE_SIZE (mode) > 8
6277 || (type
6278 && TREE_CODE (type) == VECTOR_TYPE
6279 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6280 else
6281 /* Original SPARC 64-bit ABI says that structures and unions
6282 smaller than 16 bytes are passed in registers, as well as
6283 all other base types.
6284
6285 Extended ABI (as implemented by the Sun compiler) says that
6286 complex floats are passed in registers up to 16 bytes. Pass
6287 all complex integers in registers up to 16 bytes. More generally,
6288 enforce the 2-word cap for passing arguments in registers.
6289
6290 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6291 integers are passed like floats of the same size, that is in
6292 registers (up to 16 bytes). Pass all vector floats like structure
6293 and unions. */
6294 return ((type
6295 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
6296 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
6297 /* Catch CTImode and TCmode. */
6298 || GET_MODE_SIZE (mode) > 16);
6299 }
6300
6301 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
6302 Update the data in CUM to advance over an argument
6303 of mode MODE and data type TYPE.
6304 TYPE is null for libcalls where that information may not be available. */
6305
6306 static void
6307 sparc_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
6308 const_tree type, bool named)
6309 {
6310 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
6311 int regno, padding;
6312
6313 /* We pass false for incoming_p here, it doesn't matter. */
6314 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
6315
6316 /* If argument requires leading padding, add it. */
6317 cum->words += padding;
6318
6319 if (TARGET_ARCH32)
6320 {
6321 cum->words += (mode != BLKmode
6322 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6323 : ROUND_ADVANCE (int_size_in_bytes (type)));
6324 }
6325 else
6326 {
6327 if (type && AGGREGATE_TYPE_P (type))
6328 {
6329 int size = int_size_in_bytes (type);
6330
6331 if (size <= 8)
6332 ++cum->words;
6333 else if (size <= 16)
6334 cum->words += 2;
6335 else /* passed by reference */
6336 ++cum->words;
6337 }
6338 else
6339 {
6340 cum->words += (mode != BLKmode
6341 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
6342 : ROUND_ADVANCE (int_size_in_bytes (type)));
6343 }
6344 }
6345 }
6346
6347 /* Handle the FUNCTION_ARG_PADDING macro.
6348 For the 64 bit ABI structs are always stored left shifted in their
6349 argument slot. */
6350
6351 enum direction
6352 function_arg_padding (enum machine_mode mode, const_tree type)
6353 {
6354 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
6355 return upward;
6356
6357 /* Fall back to the default. */
6358 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
6359 }
6360
6361 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
6362 Specify whether to return the return value in memory. */
6363
6364 static bool
6365 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6366 {
6367 if (TARGET_ARCH32)
6368 /* Original SPARC 32-bit ABI says that structures and unions,
6369 and quad-precision floats are returned in memory. All other
6370 base types are returned in registers.
6371
6372 Extended ABI (as implemented by the Sun compiler) says that
6373 all complex floats are returned in registers (8 FP registers
6374 at most for '_Complex long double'). Return all complex integers
6375 in registers (4 at most for '_Complex long long').
6376
6377 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6378 integers are returned like floats of the same size, that is in
6379 registers up to 8 bytes and in memory otherwise. Return all
6380 vector floats in memory like structure and unions; note that
6381 they always have BLKmode like the latter. */
6382 return (TYPE_MODE (type) == BLKmode
6383 || TYPE_MODE (type) == TFmode
6384 || (TREE_CODE (type) == VECTOR_TYPE
6385 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6386 else
6387 /* Original SPARC 64-bit ABI says that structures and unions
6388 smaller than 32 bytes are returned in registers, as well as
6389 all other base types.
6390
6391 Extended ABI (as implemented by the Sun compiler) says that all
6392 complex floats are returned in registers (8 FP registers at most
6393 for '_Complex long double'). Return all complex integers in
6394 registers (4 at most for '_Complex TItype').
6395
6396 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6397 integers are returned like floats of the same size, that is in
6398 registers. Return all vector floats like structure and unions;
6399 note that they always have BLKmode like the latter. */
6400 return (TYPE_MODE (type) == BLKmode
6401 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6402 }
6403
6404 /* Handle the TARGET_STRUCT_VALUE target hook.
6405 Return where to find the structure return value address. */
6406
6407 static rtx
6408 sparc_struct_value_rtx (tree fndecl, int incoming)
6409 {
6410 if (TARGET_ARCH64)
6411 return 0;
6412 else
6413 {
6414 rtx mem;
6415
6416 if (incoming)
6417 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6418 STRUCT_VALUE_OFFSET));
6419 else
6420 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6421 STRUCT_VALUE_OFFSET));
6422
6423 /* Only follow the SPARC ABI for fixed-size structure returns.
6424 Variable size structure returns are handled per the normal
6425 procedures in GCC. This is enabled by -mstd-struct-return */
6426 if (incoming == 2
6427 && sparc_std_struct_return
6428 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6429 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6430 {
6431 /* We must check and adjust the return address, as it is
6432 optional as to whether the return object is really
6433 provided. */
6434 rtx ret_reg = gen_rtx_REG (Pmode, 31);
6435 rtx scratch = gen_reg_rtx (SImode);
6436 rtx endlab = gen_label_rtx ();
6437
6438 /* Calculate the return object size */
6439 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6440 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6441 /* Construct a temporary return value */
6442 rtx temp_val
6443 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6444
6445 /* Implement SPARC 32-bit psABI callee return struct checking:
6446
6447 Fetch the instruction where we will return to and see if
6448 it's an unimp instruction (the most significant 10 bits
6449 will be zero). */
6450 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6451 plus_constant (ret_reg, 8)));
6452 /* Assume the size is valid and pre-adjust */
6453 emit_insn (gen_add3_insn (ret_reg, ret_reg, GEN_INT (4)));
6454 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6455 0, endlab);
6456 emit_insn (gen_sub3_insn (ret_reg, ret_reg, GEN_INT (4)));
6457 /* Write the address of the memory pointed to by temp_val into
6458 the memory pointed to by mem */
6459 emit_move_insn (mem, XEXP (temp_val, 0));
6460 emit_label (endlab);
6461 }
6462
6463 return mem;
6464 }
6465 }
6466
6467 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6468 For v9, function return values are subject to the same rules as arguments,
6469 except that up to 32 bytes may be returned in registers. */
6470
6471 static rtx
6472 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6473 bool outgoing)
6474 {
6475 /* Beware that the two values are swapped here wrt function_arg. */
6476 int regbase = (outgoing
6477 ? SPARC_INCOMING_INT_ARG_FIRST
6478 : SPARC_OUTGOING_INT_ARG_FIRST);
6479 enum mode_class mclass = GET_MODE_CLASS (mode);
6480 int regno;
6481
6482 /* Vector types deserve special treatment because they are polymorphic wrt
6483 their mode, depending upon whether VIS instructions are enabled. */
6484 if (type && TREE_CODE (type) == VECTOR_TYPE)
6485 {
6486 HOST_WIDE_INT size = int_size_in_bytes (type);
6487 gcc_assert ((TARGET_ARCH32 && size <= 8)
6488 || (TARGET_ARCH64 && size <= 32));
6489
6490 if (mode == BLKmode)
6491 return function_arg_vector_value (size,
6492 SPARC_FP_ARG_FIRST);
6493 else
6494 mclass = MODE_FLOAT;
6495 }
6496
6497 if (TARGET_ARCH64 && type)
6498 {
6499 /* Structures up to 32 bytes in size are returned in registers. */
6500 if (TREE_CODE (type) == RECORD_TYPE)
6501 {
6502 HOST_WIDE_INT size = int_size_in_bytes (type);
6503 gcc_assert (size <= 32);
6504
6505 return function_arg_record_value (type, mode, 0, 1, regbase);
6506 }
6507
6508 /* Unions up to 32 bytes in size are returned in integer registers. */
6509 else if (TREE_CODE (type) == UNION_TYPE)
6510 {
6511 HOST_WIDE_INT size = int_size_in_bytes (type);
6512 gcc_assert (size <= 32);
6513
6514 return function_arg_union_value (size, mode, 0, regbase);
6515 }
6516
6517 /* Objects that require it are returned in FP registers. */
6518 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6519 ;
6520
6521 /* All other aggregate types are returned in an integer register in a
6522 mode corresponding to the size of the type. */
6523 else if (AGGREGATE_TYPE_P (type))
6524 {
6525 /* All other aggregate types are passed in an integer register
6526 in a mode corresponding to the size of the type. */
6527 HOST_WIDE_INT size = int_size_in_bytes (type);
6528 gcc_assert (size <= 32);
6529
6530 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6531
6532 /* ??? We probably should have made the same ABI change in
6533 3.4.0 as the one we made for unions. The latter was
6534 required by the SCD though, while the former is not
6535 specified, so we favored compatibility and efficiency.
6536
6537 Now we're stuck for aggregates larger than 16 bytes,
6538 because OImode vanished in the meantime. Let's not
6539 try to be unduly clever, and simply follow the ABI
6540 for unions in that case. */
6541 if (mode == BLKmode)
6542 return function_arg_union_value (size, mode, 0, regbase);
6543 else
6544 mclass = MODE_INT;
6545 }
6546
6547 /* We should only have pointer and integer types at this point. This
6548 must match sparc_promote_function_mode. */
6549 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6550 mode = word_mode;
6551 }
6552
6553 /* We should only have pointer and integer types at this point. This must
6554 match sparc_promote_function_mode. */
6555 else if (TARGET_ARCH32
6556 && mclass == MODE_INT
6557 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6558 mode = word_mode;
6559
6560 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6561 regno = SPARC_FP_ARG_FIRST;
6562 else
6563 regno = regbase;
6564
6565 return gen_rtx_REG (mode, regno);
6566 }
6567
6568 /* Handle TARGET_FUNCTION_VALUE.
6569 On the SPARC, the value is found in the first "output" register, but the
6570 called function leaves it in the first "input" register. */
6571
6572 static rtx
6573 sparc_function_value (const_tree valtype,
6574 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6575 bool outgoing)
6576 {
6577 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6578 }
6579
6580 /* Handle TARGET_LIBCALL_VALUE. */
6581
6582 static rtx
6583 sparc_libcall_value (enum machine_mode mode,
6584 const_rtx fun ATTRIBUTE_UNUSED)
6585 {
6586 return sparc_function_value_1 (NULL_TREE, mode, false);
6587 }
6588
6589 /* Handle FUNCTION_VALUE_REGNO_P.
6590 On the SPARC, the first "output" reg is used for integer values, and the
6591 first floating point register is used for floating point values. */
6592
6593 static bool
6594 sparc_function_value_regno_p (const unsigned int regno)
6595 {
6596 return (regno == 8 || regno == 32);
6597 }
6598
6599 /* Do what is necessary for `va_start'. We look at the current function
6600 to determine if stdarg or varargs is used and return the address of
6601 the first unnamed parameter. */
6602
6603 static rtx
6604 sparc_builtin_saveregs (void)
6605 {
6606 int first_reg = crtl->args.info.words;
6607 rtx address;
6608 int regno;
6609
6610 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6611 emit_move_insn (gen_rtx_MEM (word_mode,
6612 gen_rtx_PLUS (Pmode,
6613 frame_pointer_rtx,
6614 GEN_INT (FIRST_PARM_OFFSET (0)
6615 + (UNITS_PER_WORD
6616 * regno)))),
6617 gen_rtx_REG (word_mode,
6618 SPARC_INCOMING_INT_ARG_FIRST + regno));
6619
6620 address = gen_rtx_PLUS (Pmode,
6621 frame_pointer_rtx,
6622 GEN_INT (FIRST_PARM_OFFSET (0)
6623 + UNITS_PER_WORD * first_reg));
6624
6625 return address;
6626 }
6627
6628 /* Implement `va_start' for stdarg. */
6629
6630 static void
6631 sparc_va_start (tree valist, rtx nextarg)
6632 {
6633 nextarg = expand_builtin_saveregs ();
6634 std_expand_builtin_va_start (valist, nextarg);
6635 }
6636
6637 /* Implement `va_arg' for stdarg. */
6638
6639 static tree
6640 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6641 gimple_seq *post_p)
6642 {
6643 HOST_WIDE_INT size, rsize, align;
6644 tree addr, incr;
6645 bool indirect;
6646 tree ptrtype = build_pointer_type (type);
6647
6648 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6649 {
6650 indirect = true;
6651 size = rsize = UNITS_PER_WORD;
6652 align = 0;
6653 }
6654 else
6655 {
6656 indirect = false;
6657 size = int_size_in_bytes (type);
6658 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6659 align = 0;
6660
6661 if (TARGET_ARCH64)
6662 {
6663 /* For SPARC64, objects requiring 16-byte alignment get it. */
6664 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6665 align = 2 * UNITS_PER_WORD;
6666
6667 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6668 are left-justified in their slots. */
6669 if (AGGREGATE_TYPE_P (type))
6670 {
6671 if (size == 0)
6672 size = rsize = UNITS_PER_WORD;
6673 else
6674 size = rsize;
6675 }
6676 }
6677 }
6678
6679 incr = valist;
6680 if (align)
6681 {
6682 incr = fold_build_pointer_plus_hwi (incr, align - 1);
6683 incr = fold_convert (sizetype, incr);
6684 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6685 size_int (-align));
6686 incr = fold_convert (ptr_type_node, incr);
6687 }
6688
6689 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6690 addr = incr;
6691
6692 if (BYTES_BIG_ENDIAN && size < rsize)
6693 addr = fold_build_pointer_plus_hwi (incr, rsize - size);
6694
6695 if (indirect)
6696 {
6697 addr = fold_convert (build_pointer_type (ptrtype), addr);
6698 addr = build_va_arg_indirect_ref (addr);
6699 }
6700
6701 /* If the address isn't aligned properly for the type, we need a temporary.
6702 FIXME: This is inefficient, usually we can do this in registers. */
6703 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6704 {
6705 tree tmp = create_tmp_var (type, "va_arg_tmp");
6706 tree dest_addr = build_fold_addr_expr (tmp);
6707 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6708 3, dest_addr, addr, size_int (rsize));
6709 TREE_ADDRESSABLE (tmp) = 1;
6710 gimplify_and_add (copy, pre_p);
6711 addr = dest_addr;
6712 }
6713
6714 else
6715 addr = fold_convert (ptrtype, addr);
6716
6717 incr = fold_build_pointer_plus_hwi (incr, rsize);
6718 gimplify_assign (valist, incr, post_p);
6719
6720 return build_va_arg_indirect_ref (addr);
6721 }
6722 \f
6723 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6724 Specify whether the vector mode is supported by the hardware. */
6725
6726 static bool
6727 sparc_vector_mode_supported_p (enum machine_mode mode)
6728 {
6729 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6730 }
6731 \f
6732 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6733
6734 static enum machine_mode
6735 sparc_preferred_simd_mode (enum machine_mode mode)
6736 {
6737 if (TARGET_VIS)
6738 switch (mode)
6739 {
6740 case SImode:
6741 return V2SImode;
6742 case HImode:
6743 return V4HImode;
6744 case QImode:
6745 return V8QImode;
6746
6747 default:;
6748 }
6749
6750 return word_mode;
6751 }
6752 \f
6753 /* Return the string to output an unconditional branch to LABEL, which is
6754 the operand number of the label.
6755
6756 DEST is the destination insn (i.e. the label), INSN is the source. */
6757
6758 const char *
6759 output_ubranch (rtx dest, int label, rtx insn)
6760 {
6761 static char string[64];
6762 bool v9_form = false;
6763 char *p;
6764
6765 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6766 {
6767 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6768 - INSN_ADDRESSES (INSN_UID (insn)));
6769 /* Leave some instructions for "slop". */
6770 if (delta >= -260000 && delta < 260000)
6771 v9_form = true;
6772 }
6773
6774 if (v9_form)
6775 strcpy (string, "ba%*,pt\t%%xcc, ");
6776 else
6777 strcpy (string, "b%*\t");
6778
6779 p = strchr (string, '\0');
6780 *p++ = '%';
6781 *p++ = 'l';
6782 *p++ = '0' + label;
6783 *p++ = '%';
6784 *p++ = '(';
6785 *p = '\0';
6786
6787 return string;
6788 }
6789
6790 /* Return the string to output a conditional branch to LABEL, which is
6791 the operand number of the label. OP is the conditional expression.
6792 XEXP (OP, 0) is assumed to be a condition code register (integer or
6793 floating point) and its mode specifies what kind of comparison we made.
6794
6795 DEST is the destination insn (i.e. the label), INSN is the source.
6796
6797 REVERSED is nonzero if we should reverse the sense of the comparison.
6798
6799 ANNUL is nonzero if we should generate an annulling branch. */
6800
6801 const char *
6802 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6803 rtx insn)
6804 {
6805 static char string[64];
6806 enum rtx_code code = GET_CODE (op);
6807 rtx cc_reg = XEXP (op, 0);
6808 enum machine_mode mode = GET_MODE (cc_reg);
6809 const char *labelno, *branch;
6810 int spaces = 8, far;
6811 char *p;
6812
6813 /* v9 branches are limited to +-1MB. If it is too far away,
6814 change
6815
6816 bne,pt %xcc, .LC30
6817
6818 to
6819
6820 be,pn %xcc, .+12
6821 nop
6822 ba .LC30
6823
6824 and
6825
6826 fbne,a,pn %fcc2, .LC29
6827
6828 to
6829
6830 fbe,pt %fcc2, .+16
6831 nop
6832 ba .LC29 */
6833
6834 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6835 if (reversed ^ far)
6836 {
6837 /* Reversal of FP compares takes care -- an ordered compare
6838 becomes an unordered compare and vice versa. */
6839 if (mode == CCFPmode || mode == CCFPEmode)
6840 code = reverse_condition_maybe_unordered (code);
6841 else
6842 code = reverse_condition (code);
6843 }
6844
6845 /* Start by writing the branch condition. */
6846 if (mode == CCFPmode || mode == CCFPEmode)
6847 {
6848 switch (code)
6849 {
6850 case NE:
6851 branch = "fbne";
6852 break;
6853 case EQ:
6854 branch = "fbe";
6855 break;
6856 case GE:
6857 branch = "fbge";
6858 break;
6859 case GT:
6860 branch = "fbg";
6861 break;
6862 case LE:
6863 branch = "fble";
6864 break;
6865 case LT:
6866 branch = "fbl";
6867 break;
6868 case UNORDERED:
6869 branch = "fbu";
6870 break;
6871 case ORDERED:
6872 branch = "fbo";
6873 break;
6874 case UNGT:
6875 branch = "fbug";
6876 break;
6877 case UNLT:
6878 branch = "fbul";
6879 break;
6880 case UNEQ:
6881 branch = "fbue";
6882 break;
6883 case UNGE:
6884 branch = "fbuge";
6885 break;
6886 case UNLE:
6887 branch = "fbule";
6888 break;
6889 case LTGT:
6890 branch = "fblg";
6891 break;
6892
6893 default:
6894 gcc_unreachable ();
6895 }
6896
6897 /* ??? !v9: FP branches cannot be preceded by another floating point
6898 insn. Because there is currently no concept of pre-delay slots,
6899 we can fix this only by always emitting a nop before a floating
6900 point branch. */
6901
6902 string[0] = '\0';
6903 if (! TARGET_V9)
6904 strcpy (string, "nop\n\t");
6905 strcat (string, branch);
6906 }
6907 else
6908 {
6909 switch (code)
6910 {
6911 case NE:
6912 branch = "bne";
6913 break;
6914 case EQ:
6915 branch = "be";
6916 break;
6917 case GE:
6918 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6919 branch = "bpos";
6920 else
6921 branch = "bge";
6922 break;
6923 case GT:
6924 branch = "bg";
6925 break;
6926 case LE:
6927 branch = "ble";
6928 break;
6929 case LT:
6930 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6931 branch = "bneg";
6932 else
6933 branch = "bl";
6934 break;
6935 case GEU:
6936 branch = "bgeu";
6937 break;
6938 case GTU:
6939 branch = "bgu";
6940 break;
6941 case LEU:
6942 branch = "bleu";
6943 break;
6944 case LTU:
6945 branch = "blu";
6946 break;
6947
6948 default:
6949 gcc_unreachable ();
6950 }
6951 strcpy (string, branch);
6952 }
6953 spaces -= strlen (branch);
6954 p = strchr (string, '\0');
6955
6956 /* Now add the annulling, the label, and a possible noop. */
6957 if (annul && ! far)
6958 {
6959 strcpy (p, ",a");
6960 p += 2;
6961 spaces -= 2;
6962 }
6963
6964 if (TARGET_V9)
6965 {
6966 rtx note;
6967 int v8 = 0;
6968
6969 if (! far && insn && INSN_ADDRESSES_SET_P ())
6970 {
6971 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6972 - INSN_ADDRESSES (INSN_UID (insn)));
6973 /* Leave some instructions for "slop". */
6974 if (delta < -260000 || delta >= 260000)
6975 v8 = 1;
6976 }
6977
6978 if (mode == CCFPmode || mode == CCFPEmode)
6979 {
6980 static char v9_fcc_labelno[] = "%%fccX, ";
6981 /* Set the char indicating the number of the fcc reg to use. */
6982 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6983 labelno = v9_fcc_labelno;
6984 if (v8)
6985 {
6986 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6987 labelno = "";
6988 }
6989 }
6990 else if (mode == CCXmode || mode == CCX_NOOVmode)
6991 {
6992 labelno = "%%xcc, ";
6993 gcc_assert (! v8);
6994 }
6995 else
6996 {
6997 labelno = "%%icc, ";
6998 if (v8)
6999 labelno = "";
7000 }
7001
7002 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7003 {
7004 strcpy (p,
7005 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7006 ? ",pt" : ",pn");
7007 p += 3;
7008 spaces -= 3;
7009 }
7010 }
7011 else
7012 labelno = "";
7013
7014 if (spaces > 0)
7015 *p++ = '\t';
7016 else
7017 *p++ = ' ';
7018 strcpy (p, labelno);
7019 p = strchr (p, '\0');
7020 if (far)
7021 {
7022 strcpy (p, ".+12\n\t nop\n\tb\t");
7023 /* Skip the next insn if requested or
7024 if we know that it will be a nop. */
7025 if (annul || ! final_sequence)
7026 p[3] = '6';
7027 p += 14;
7028 }
7029 *p++ = '%';
7030 *p++ = 'l';
7031 *p++ = label + '0';
7032 *p++ = '%';
7033 *p++ = '#';
7034 *p = '\0';
7035
7036 return string;
7037 }
7038
7039 /* Emit a library call comparison between floating point X and Y.
7040 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
7041 Return the new operator to be used in the comparison sequence.
7042
7043 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
7044 values as arguments instead of the TFmode registers themselves,
7045 that's why we cannot call emit_float_lib_cmp. */
7046
7047 rtx
7048 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
7049 {
7050 const char *qpfunc;
7051 rtx slot0, slot1, result, tem, tem2, libfunc;
7052 enum machine_mode mode;
7053 enum rtx_code new_comparison;
7054
7055 switch (comparison)
7056 {
7057 case EQ:
7058 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
7059 break;
7060
7061 case NE:
7062 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
7063 break;
7064
7065 case GT:
7066 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
7067 break;
7068
7069 case GE:
7070 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
7071 break;
7072
7073 case LT:
7074 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
7075 break;
7076
7077 case LE:
7078 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
7079 break;
7080
7081 case ORDERED:
7082 case UNORDERED:
7083 case UNGT:
7084 case UNLT:
7085 case UNEQ:
7086 case UNGE:
7087 case UNLE:
7088 case LTGT:
7089 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
7090 break;
7091
7092 default:
7093 gcc_unreachable ();
7094 }
7095
7096 if (TARGET_ARCH64)
7097 {
7098 if (MEM_P (x))
7099 slot0 = x;
7100 else
7101 {
7102 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7103 emit_move_insn (slot0, x);
7104 }
7105
7106 if (MEM_P (y))
7107 slot1 = y;
7108 else
7109 {
7110 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
7111 emit_move_insn (slot1, y);
7112 }
7113
7114 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7115 emit_library_call (libfunc, LCT_NORMAL,
7116 DImode, 2,
7117 XEXP (slot0, 0), Pmode,
7118 XEXP (slot1, 0), Pmode);
7119 mode = DImode;
7120 }
7121 else
7122 {
7123 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
7124 emit_library_call (libfunc, LCT_NORMAL,
7125 SImode, 2,
7126 x, TFmode, y, TFmode);
7127 mode = SImode;
7128 }
7129
7130
7131 /* Immediately move the result of the libcall into a pseudo
7132 register so reload doesn't clobber the value if it needs
7133 the return register for a spill reg. */
7134 result = gen_reg_rtx (mode);
7135 emit_move_insn (result, hard_libcall_value (mode, libfunc));
7136
7137 switch (comparison)
7138 {
7139 default:
7140 return gen_rtx_NE (VOIDmode, result, const0_rtx);
7141 case ORDERED:
7142 case UNORDERED:
7143 new_comparison = (comparison == UNORDERED ? EQ : NE);
7144 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
7145 case UNGT:
7146 case UNGE:
7147 new_comparison = (comparison == UNGT ? GT : NE);
7148 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
7149 case UNLE:
7150 return gen_rtx_NE (VOIDmode, result, const2_rtx);
7151 case UNLT:
7152 tem = gen_reg_rtx (mode);
7153 if (TARGET_ARCH32)
7154 emit_insn (gen_andsi3 (tem, result, const1_rtx));
7155 else
7156 emit_insn (gen_anddi3 (tem, result, const1_rtx));
7157 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
7158 case UNEQ:
7159 case LTGT:
7160 tem = gen_reg_rtx (mode);
7161 if (TARGET_ARCH32)
7162 emit_insn (gen_addsi3 (tem, result, const1_rtx));
7163 else
7164 emit_insn (gen_adddi3 (tem, result, const1_rtx));
7165 tem2 = gen_reg_rtx (mode);
7166 if (TARGET_ARCH32)
7167 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
7168 else
7169 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
7170 new_comparison = (comparison == UNEQ ? EQ : NE);
7171 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
7172 }
7173
7174 gcc_unreachable ();
7175 }
7176
7177 /* Generate an unsigned DImode to FP conversion. This is the same code
7178 optabs would emit if we didn't have TFmode patterns. */
7179
7180 void
7181 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
7182 {
7183 rtx neglab, donelab, i0, i1, f0, in, out;
7184
7185 out = operands[0];
7186 in = force_reg (DImode, operands[1]);
7187 neglab = gen_label_rtx ();
7188 donelab = gen_label_rtx ();
7189 i0 = gen_reg_rtx (DImode);
7190 i1 = gen_reg_rtx (DImode);
7191 f0 = gen_reg_rtx (mode);
7192
7193 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
7194
7195 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
7196 emit_jump_insn (gen_jump (donelab));
7197 emit_barrier ();
7198
7199 emit_label (neglab);
7200
7201 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
7202 emit_insn (gen_anddi3 (i1, in, const1_rtx));
7203 emit_insn (gen_iordi3 (i0, i0, i1));
7204 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
7205 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
7206
7207 emit_label (donelab);
7208 }
7209
7210 /* Generate an FP to unsigned DImode conversion. This is the same code
7211 optabs would emit if we didn't have TFmode patterns. */
7212
7213 void
7214 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
7215 {
7216 rtx neglab, donelab, i0, i1, f0, in, out, limit;
7217
7218 out = operands[0];
7219 in = force_reg (mode, operands[1]);
7220 neglab = gen_label_rtx ();
7221 donelab = gen_label_rtx ();
7222 i0 = gen_reg_rtx (DImode);
7223 i1 = gen_reg_rtx (DImode);
7224 limit = gen_reg_rtx (mode);
7225 f0 = gen_reg_rtx (mode);
7226
7227 emit_move_insn (limit,
7228 CONST_DOUBLE_FROM_REAL_VALUE (
7229 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
7230 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
7231
7232 emit_insn (gen_rtx_SET (VOIDmode,
7233 out,
7234 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
7235 emit_jump_insn (gen_jump (donelab));
7236 emit_barrier ();
7237
7238 emit_label (neglab);
7239
7240 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
7241 emit_insn (gen_rtx_SET (VOIDmode,
7242 i0,
7243 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
7244 emit_insn (gen_movdi (i1, const1_rtx));
7245 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
7246 emit_insn (gen_xordi3 (out, i0, i1));
7247
7248 emit_label (donelab);
7249 }
7250
7251 /* Return the string to output a conditional branch to LABEL, testing
7252 register REG. LABEL is the operand number of the label; REG is the
7253 operand number of the reg. OP is the conditional expression. The mode
7254 of REG says what kind of comparison we made.
7255
7256 DEST is the destination insn (i.e. the label), INSN is the source.
7257
7258 REVERSED is nonzero if we should reverse the sense of the comparison.
7259
7260 ANNUL is nonzero if we should generate an annulling branch. */
7261
7262 const char *
7263 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
7264 int annul, rtx insn)
7265 {
7266 static char string[64];
7267 enum rtx_code code = GET_CODE (op);
7268 enum machine_mode mode = GET_MODE (XEXP (op, 0));
7269 rtx note;
7270 int far;
7271 char *p;
7272
7273 /* branch on register are limited to +-128KB. If it is too far away,
7274 change
7275
7276 brnz,pt %g1, .LC30
7277
7278 to
7279
7280 brz,pn %g1, .+12
7281 nop
7282 ba,pt %xcc, .LC30
7283
7284 and
7285
7286 brgez,a,pn %o1, .LC29
7287
7288 to
7289
7290 brlz,pt %o1, .+16
7291 nop
7292 ba,pt %xcc, .LC29 */
7293
7294 far = get_attr_length (insn) >= 3;
7295
7296 /* If not floating-point or if EQ or NE, we can just reverse the code. */
7297 if (reversed ^ far)
7298 code = reverse_condition (code);
7299
7300 /* Only 64 bit versions of these instructions exist. */
7301 gcc_assert (mode == DImode);
7302
7303 /* Start by writing the branch condition. */
7304
7305 switch (code)
7306 {
7307 case NE:
7308 strcpy (string, "brnz");
7309 break;
7310
7311 case EQ:
7312 strcpy (string, "brz");
7313 break;
7314
7315 case GE:
7316 strcpy (string, "brgez");
7317 break;
7318
7319 case LT:
7320 strcpy (string, "brlz");
7321 break;
7322
7323 case LE:
7324 strcpy (string, "brlez");
7325 break;
7326
7327 case GT:
7328 strcpy (string, "brgz");
7329 break;
7330
7331 default:
7332 gcc_unreachable ();
7333 }
7334
7335 p = strchr (string, '\0');
7336
7337 /* Now add the annulling, reg, label, and nop. */
7338 if (annul && ! far)
7339 {
7340 strcpy (p, ",a");
7341 p += 2;
7342 }
7343
7344 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
7345 {
7346 strcpy (p,
7347 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
7348 ? ",pt" : ",pn");
7349 p += 3;
7350 }
7351
7352 *p = p < string + 8 ? '\t' : ' ';
7353 p++;
7354 *p++ = '%';
7355 *p++ = '0' + reg;
7356 *p++ = ',';
7357 *p++ = ' ';
7358 if (far)
7359 {
7360 int veryfar = 1, delta;
7361
7362 if (INSN_ADDRESSES_SET_P ())
7363 {
7364 delta = (INSN_ADDRESSES (INSN_UID (dest))
7365 - INSN_ADDRESSES (INSN_UID (insn)));
7366 /* Leave some instructions for "slop". */
7367 if (delta >= -260000 && delta < 260000)
7368 veryfar = 0;
7369 }
7370
7371 strcpy (p, ".+12\n\t nop\n\t");
7372 /* Skip the next insn if requested or
7373 if we know that it will be a nop. */
7374 if (annul || ! final_sequence)
7375 p[3] = '6';
7376 p += 12;
7377 if (veryfar)
7378 {
7379 strcpy (p, "b\t");
7380 p += 2;
7381 }
7382 else
7383 {
7384 strcpy (p, "ba,pt\t%%xcc, ");
7385 p += 13;
7386 }
7387 }
7388 *p++ = '%';
7389 *p++ = 'l';
7390 *p++ = '0' + label;
7391 *p++ = '%';
7392 *p++ = '#';
7393 *p = '\0';
7394
7395 return string;
7396 }
7397
7398 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7399 Such instructions cannot be used in the delay slot of return insn on v9.
7400 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7401 */
7402
7403 static int
7404 epilogue_renumber (register rtx *where, int test)
7405 {
7406 register const char *fmt;
7407 register int i;
7408 register enum rtx_code code;
7409
7410 if (*where == 0)
7411 return 0;
7412
7413 code = GET_CODE (*where);
7414
7415 switch (code)
7416 {
7417 case REG:
7418 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7419 return 1;
7420 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7421 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7422 case SCRATCH:
7423 case CC0:
7424 case PC:
7425 case CONST_INT:
7426 case CONST_DOUBLE:
7427 return 0;
7428
7429 /* Do not replace the frame pointer with the stack pointer because
7430 it can cause the delayed instruction to load below the stack.
7431 This occurs when instructions like:
7432
7433 (set (reg/i:SI 24 %i0)
7434 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7435 (const_int -20 [0xffffffec])) 0))
7436
7437 are in the return delayed slot. */
7438 case PLUS:
7439 if (GET_CODE (XEXP (*where, 0)) == REG
7440 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7441 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7442 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7443 return 1;
7444 break;
7445
7446 case MEM:
7447 if (SPARC_STACK_BIAS
7448 && GET_CODE (XEXP (*where, 0)) == REG
7449 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7450 return 1;
7451 break;
7452
7453 default:
7454 break;
7455 }
7456
7457 fmt = GET_RTX_FORMAT (code);
7458
7459 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7460 {
7461 if (fmt[i] == 'E')
7462 {
7463 register int j;
7464 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7465 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7466 return 1;
7467 }
7468 else if (fmt[i] == 'e'
7469 && epilogue_renumber (&(XEXP (*where, i)), test))
7470 return 1;
7471 }
7472 return 0;
7473 }
7474 \f
7475 /* Leaf functions and non-leaf functions have different needs. */
7476
7477 static const int
7478 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7479
7480 static const int
7481 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7482
7483 static const int *const reg_alloc_orders[] = {
7484 reg_leaf_alloc_order,
7485 reg_nonleaf_alloc_order};
7486
7487 void
7488 order_regs_for_local_alloc (void)
7489 {
7490 static int last_order_nonleaf = 1;
7491
7492 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7493 {
7494 last_order_nonleaf = !last_order_nonleaf;
7495 memcpy ((char *) reg_alloc_order,
7496 (const char *) reg_alloc_orders[last_order_nonleaf],
7497 FIRST_PSEUDO_REGISTER * sizeof (int));
7498 }
7499 }
7500 \f
7501 /* Return 1 if REG and MEM are legitimate enough to allow the various
7502 mem<-->reg splits to be run. */
7503
7504 int
7505 sparc_splitdi_legitimate (rtx reg, rtx mem)
7506 {
7507 /* Punt if we are here by mistake. */
7508 gcc_assert (reload_completed);
7509
7510 /* We must have an offsettable memory reference. */
7511 if (! offsettable_memref_p (mem))
7512 return 0;
7513
7514 /* If we have legitimate args for ldd/std, we do not want
7515 the split to happen. */
7516 if ((REGNO (reg) % 2) == 0
7517 && mem_min_alignment (mem, 8))
7518 return 0;
7519
7520 /* Success. */
7521 return 1;
7522 }
7523
7524 /* Return 1 if x and y are some kind of REG and they refer to
7525 different hard registers. This test is guaranteed to be
7526 run after reload. */
7527
7528 int
7529 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7530 {
7531 if (GET_CODE (x) != REG)
7532 return 0;
7533 if (GET_CODE (y) != REG)
7534 return 0;
7535 if (REGNO (x) == REGNO (y))
7536 return 0;
7537 return 1;
7538 }
7539
7540 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7541 This makes them candidates for using ldd and std insns.
7542
7543 Note reg1 and reg2 *must* be hard registers. */
7544
7545 int
7546 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7547 {
7548 /* We might have been passed a SUBREG. */
7549 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7550 return 0;
7551
7552 if (REGNO (reg1) % 2 != 0)
7553 return 0;
7554
7555 /* Integer ldd is deprecated in SPARC V9 */
7556 if (TARGET_V9 && REGNO (reg1) < 32)
7557 return 0;
7558
7559 return (REGNO (reg1) == REGNO (reg2) - 1);
7560 }
7561
7562 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7563 an ldd or std insn.
7564
7565 This can only happen when addr1 and addr2, the addresses in mem1
7566 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7567 addr1 must also be aligned on a 64-bit boundary.
7568
7569 Also iff dependent_reg_rtx is not null it should not be used to
7570 compute the address for mem1, i.e. we cannot optimize a sequence
7571 like:
7572 ld [%o0], %o0
7573 ld [%o0 + 4], %o1
7574 to
7575 ldd [%o0], %o0
7576 nor:
7577 ld [%g3 + 4], %g3
7578 ld [%g3], %g2
7579 to
7580 ldd [%g3], %g2
7581
7582 But, note that the transformation from:
7583 ld [%g2 + 4], %g3
7584 ld [%g2], %g2
7585 to
7586 ldd [%g2], %g2
7587 is perfectly fine. Thus, the peephole2 patterns always pass us
7588 the destination register of the first load, never the second one.
7589
7590 For stores we don't have a similar problem, so dependent_reg_rtx is
7591 NULL_RTX. */
7592
7593 int
7594 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7595 {
7596 rtx addr1, addr2;
7597 unsigned int reg1;
7598 HOST_WIDE_INT offset1;
7599
7600 /* The mems cannot be volatile. */
7601 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7602 return 0;
7603
7604 /* MEM1 should be aligned on a 64-bit boundary. */
7605 if (MEM_ALIGN (mem1) < 64)
7606 return 0;
7607
7608 addr1 = XEXP (mem1, 0);
7609 addr2 = XEXP (mem2, 0);
7610
7611 /* Extract a register number and offset (if used) from the first addr. */
7612 if (GET_CODE (addr1) == PLUS)
7613 {
7614 /* If not a REG, return zero. */
7615 if (GET_CODE (XEXP (addr1, 0)) != REG)
7616 return 0;
7617 else
7618 {
7619 reg1 = REGNO (XEXP (addr1, 0));
7620 /* The offset must be constant! */
7621 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7622 return 0;
7623 offset1 = INTVAL (XEXP (addr1, 1));
7624 }
7625 }
7626 else if (GET_CODE (addr1) != REG)
7627 return 0;
7628 else
7629 {
7630 reg1 = REGNO (addr1);
7631 /* This was a simple (mem (reg)) expression. Offset is 0. */
7632 offset1 = 0;
7633 }
7634
7635 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7636 if (GET_CODE (addr2) != PLUS)
7637 return 0;
7638
7639 if (GET_CODE (XEXP (addr2, 0)) != REG
7640 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7641 return 0;
7642
7643 if (reg1 != REGNO (XEXP (addr2, 0)))
7644 return 0;
7645
7646 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7647 return 0;
7648
7649 /* The first offset must be evenly divisible by 8 to ensure the
7650 address is 64 bit aligned. */
7651 if (offset1 % 8 != 0)
7652 return 0;
7653
7654 /* The offset for the second addr must be 4 more than the first addr. */
7655 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7656 return 0;
7657
7658 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7659 instructions. */
7660 return 1;
7661 }
7662
7663 /* Return 1 if reg is a pseudo, or is the first register in
7664 a hard register pair. This makes it suitable for use in
7665 ldd and std insns. */
7666
7667 int
7668 register_ok_for_ldd (rtx reg)
7669 {
7670 /* We might have been passed a SUBREG. */
7671 if (!REG_P (reg))
7672 return 0;
7673
7674 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7675 return (REGNO (reg) % 2 == 0);
7676
7677 return 1;
7678 }
7679
7680 /* Return 1 if OP is a memory whose address is known to be
7681 aligned to 8-byte boundary, or a pseudo during reload.
7682 This makes it suitable for use in ldd and std insns. */
7683
7684 int
7685 memory_ok_for_ldd (rtx op)
7686 {
7687 if (MEM_P (op))
7688 {
7689 /* In 64-bit mode, we assume that the address is word-aligned. */
7690 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7691 return 0;
7692
7693 if ((reload_in_progress || reload_completed)
7694 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7695 return 0;
7696 }
7697 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7698 {
7699 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7700 return 0;
7701 }
7702 else
7703 return 0;
7704
7705 return 1;
7706 }
7707 \f
7708 /* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */
7709
7710 static bool
7711 sparc_print_operand_punct_valid_p (unsigned char code)
7712 {
7713 if (code == '#'
7714 || code == '*'
7715 || code == '('
7716 || code == ')'
7717 || code == '_'
7718 || code == '&')
7719 return true;
7720
7721 return false;
7722 }
7723
7724 /* Implement TARGET_PRINT_OPERAND.
7725 Print operand X (an rtx) in assembler syntax to file FILE.
7726 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7727 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7728
7729 static void
7730 sparc_print_operand (FILE *file, rtx x, int code)
7731 {
7732 switch (code)
7733 {
7734 case '#':
7735 /* Output an insn in a delay slot. */
7736 if (final_sequence)
7737 sparc_indent_opcode = 1;
7738 else
7739 fputs ("\n\t nop", file);
7740 return;
7741 case '*':
7742 /* Output an annul flag if there's nothing for the delay slot and we
7743 are optimizing. This is always used with '(' below.
7744 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7745 this is a dbx bug. So, we only do this when optimizing.
7746 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7747 Always emit a nop in case the next instruction is a branch. */
7748 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7749 fputs (",a", file);
7750 return;
7751 case '(':
7752 /* Output a 'nop' if there's nothing for the delay slot and we are
7753 not optimizing. This is always used with '*' above. */
7754 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7755 fputs ("\n\t nop", file);
7756 else if (final_sequence)
7757 sparc_indent_opcode = 1;
7758 return;
7759 case ')':
7760 /* Output the right displacement from the saved PC on function return.
7761 The caller may have placed an "unimp" insn immediately after the call
7762 so we have to account for it. This insn is used in the 32-bit ABI
7763 when calling a function that returns a non zero-sized structure. The
7764 64-bit ABI doesn't have it. Be careful to have this test be the same
7765 as that for the call. The exception is when sparc_std_struct_return
7766 is enabled, the psABI is followed exactly and the adjustment is made
7767 by the code in sparc_struct_value_rtx. The call emitted is the same
7768 when sparc_std_struct_return is enabled. */
7769 if (!TARGET_ARCH64
7770 && cfun->returns_struct
7771 && !sparc_std_struct_return
7772 && DECL_SIZE (DECL_RESULT (current_function_decl))
7773 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7774 == INTEGER_CST
7775 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7776 fputs ("12", file);
7777 else
7778 fputc ('8', file);
7779 return;
7780 case '_':
7781 /* Output the Embedded Medium/Anywhere code model base register. */
7782 fputs (EMBMEDANY_BASE_REG, file);
7783 return;
7784 case '&':
7785 /* Print some local dynamic TLS name. */
7786 assemble_name (file, get_some_local_dynamic_name ());
7787 return;
7788
7789 case 'Y':
7790 /* Adjust the operand to take into account a RESTORE operation. */
7791 if (GET_CODE (x) == CONST_INT)
7792 break;
7793 else if (GET_CODE (x) != REG)
7794 output_operand_lossage ("invalid %%Y operand");
7795 else if (REGNO (x) < 8)
7796 fputs (reg_names[REGNO (x)], file);
7797 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7798 fputs (reg_names[REGNO (x)-16], file);
7799 else
7800 output_operand_lossage ("invalid %%Y operand");
7801 return;
7802 case 'L':
7803 /* Print out the low order register name of a register pair. */
7804 if (WORDS_BIG_ENDIAN)
7805 fputs (reg_names[REGNO (x)+1], file);
7806 else
7807 fputs (reg_names[REGNO (x)], file);
7808 return;
7809 case 'H':
7810 /* Print out the high order register name of a register pair. */
7811 if (WORDS_BIG_ENDIAN)
7812 fputs (reg_names[REGNO (x)], file);
7813 else
7814 fputs (reg_names[REGNO (x)+1], file);
7815 return;
7816 case 'R':
7817 /* Print out the second register name of a register pair or quad.
7818 I.e., R (%o0) => %o1. */
7819 fputs (reg_names[REGNO (x)+1], file);
7820 return;
7821 case 'S':
7822 /* Print out the third register name of a register quad.
7823 I.e., S (%o0) => %o2. */
7824 fputs (reg_names[REGNO (x)+2], file);
7825 return;
7826 case 'T':
7827 /* Print out the fourth register name of a register quad.
7828 I.e., T (%o0) => %o3. */
7829 fputs (reg_names[REGNO (x)+3], file);
7830 return;
7831 case 'x':
7832 /* Print a condition code register. */
7833 if (REGNO (x) == SPARC_ICC_REG)
7834 {
7835 /* We don't handle CC[X]_NOOVmode because they're not supposed
7836 to occur here. */
7837 if (GET_MODE (x) == CCmode)
7838 fputs ("%icc", file);
7839 else if (GET_MODE (x) == CCXmode)
7840 fputs ("%xcc", file);
7841 else
7842 gcc_unreachable ();
7843 }
7844 else
7845 /* %fccN register */
7846 fputs (reg_names[REGNO (x)], file);
7847 return;
7848 case 'm':
7849 /* Print the operand's address only. */
7850 output_address (XEXP (x, 0));
7851 return;
7852 case 'r':
7853 /* In this case we need a register. Use %g0 if the
7854 operand is const0_rtx. */
7855 if (x == const0_rtx
7856 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7857 {
7858 fputs ("%g0", file);
7859 return;
7860 }
7861 else
7862 break;
7863
7864 case 'A':
7865 switch (GET_CODE (x))
7866 {
7867 case IOR: fputs ("or", file); break;
7868 case AND: fputs ("and", file); break;
7869 case XOR: fputs ("xor", file); break;
7870 default: output_operand_lossage ("invalid %%A operand");
7871 }
7872 return;
7873
7874 case 'B':
7875 switch (GET_CODE (x))
7876 {
7877 case IOR: fputs ("orn", file); break;
7878 case AND: fputs ("andn", file); break;
7879 case XOR: fputs ("xnor", file); break;
7880 default: output_operand_lossage ("invalid %%B operand");
7881 }
7882 return;
7883
7884 /* These are used by the conditional move instructions. */
7885 case 'c' :
7886 case 'C':
7887 {
7888 enum rtx_code rc = GET_CODE (x);
7889
7890 if (code == 'c')
7891 {
7892 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7893 if (mode == CCFPmode || mode == CCFPEmode)
7894 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7895 else
7896 rc = reverse_condition (GET_CODE (x));
7897 }
7898 switch (rc)
7899 {
7900 case NE: fputs ("ne", file); break;
7901 case EQ: fputs ("e", file); break;
7902 case GE: fputs ("ge", file); break;
7903 case GT: fputs ("g", file); break;
7904 case LE: fputs ("le", file); break;
7905 case LT: fputs ("l", file); break;
7906 case GEU: fputs ("geu", file); break;
7907 case GTU: fputs ("gu", file); break;
7908 case LEU: fputs ("leu", file); break;
7909 case LTU: fputs ("lu", file); break;
7910 case LTGT: fputs ("lg", file); break;
7911 case UNORDERED: fputs ("u", file); break;
7912 case ORDERED: fputs ("o", file); break;
7913 case UNLT: fputs ("ul", file); break;
7914 case UNLE: fputs ("ule", file); break;
7915 case UNGT: fputs ("ug", file); break;
7916 case UNGE: fputs ("uge", file); break;
7917 case UNEQ: fputs ("ue", file); break;
7918 default: output_operand_lossage (code == 'c'
7919 ? "invalid %%c operand"
7920 : "invalid %%C operand");
7921 }
7922 return;
7923 }
7924
7925 /* These are used by the movr instruction pattern. */
7926 case 'd':
7927 case 'D':
7928 {
7929 enum rtx_code rc = (code == 'd'
7930 ? reverse_condition (GET_CODE (x))
7931 : GET_CODE (x));
7932 switch (rc)
7933 {
7934 case NE: fputs ("ne", file); break;
7935 case EQ: fputs ("e", file); break;
7936 case GE: fputs ("gez", file); break;
7937 case LT: fputs ("lz", file); break;
7938 case LE: fputs ("lez", file); break;
7939 case GT: fputs ("gz", file); break;
7940 default: output_operand_lossage (code == 'd'
7941 ? "invalid %%d operand"
7942 : "invalid %%D operand");
7943 }
7944 return;
7945 }
7946
7947 case 'b':
7948 {
7949 /* Print a sign-extended character. */
7950 int i = trunc_int_for_mode (INTVAL (x), QImode);
7951 fprintf (file, "%d", i);
7952 return;
7953 }
7954
7955 case 'f':
7956 /* Operand must be a MEM; write its address. */
7957 if (GET_CODE (x) != MEM)
7958 output_operand_lossage ("invalid %%f operand");
7959 output_address (XEXP (x, 0));
7960 return;
7961
7962 case 's':
7963 {
7964 /* Print a sign-extended 32-bit value. */
7965 HOST_WIDE_INT i;
7966 if (GET_CODE(x) == CONST_INT)
7967 i = INTVAL (x);
7968 else if (GET_CODE(x) == CONST_DOUBLE)
7969 i = CONST_DOUBLE_LOW (x);
7970 else
7971 {
7972 output_operand_lossage ("invalid %%s operand");
7973 return;
7974 }
7975 i = trunc_int_for_mode (i, SImode);
7976 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7977 return;
7978 }
7979
7980 case 0:
7981 /* Do nothing special. */
7982 break;
7983
7984 default:
7985 /* Undocumented flag. */
7986 output_operand_lossage ("invalid operand output code");
7987 }
7988
7989 if (GET_CODE (x) == REG)
7990 fputs (reg_names[REGNO (x)], file);
7991 else if (GET_CODE (x) == MEM)
7992 {
7993 fputc ('[', file);
7994 /* Poor Sun assembler doesn't understand absolute addressing. */
7995 if (CONSTANT_P (XEXP (x, 0)))
7996 fputs ("%g0+", file);
7997 output_address (XEXP (x, 0));
7998 fputc (']', file);
7999 }
8000 else if (GET_CODE (x) == HIGH)
8001 {
8002 fputs ("%hi(", file);
8003 output_addr_const (file, XEXP (x, 0));
8004 fputc (')', file);
8005 }
8006 else if (GET_CODE (x) == LO_SUM)
8007 {
8008 sparc_print_operand (file, XEXP (x, 0), 0);
8009 if (TARGET_CM_MEDMID)
8010 fputs ("+%l44(", file);
8011 else
8012 fputs ("+%lo(", file);
8013 output_addr_const (file, XEXP (x, 1));
8014 fputc (')', file);
8015 }
8016 else if (GET_CODE (x) == CONST_DOUBLE
8017 && (GET_MODE (x) == VOIDmode
8018 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
8019 {
8020 if (CONST_DOUBLE_HIGH (x) == 0)
8021 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
8022 else if (CONST_DOUBLE_HIGH (x) == -1
8023 && CONST_DOUBLE_LOW (x) < 0)
8024 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
8025 else
8026 output_operand_lossage ("long long constant not a valid immediate operand");
8027 }
8028 else if (GET_CODE (x) == CONST_DOUBLE)
8029 output_operand_lossage ("floating point constant not a valid immediate operand");
8030 else { output_addr_const (file, x); }
8031 }
8032
8033 /* Implement TARGET_PRINT_OPERAND_ADDRESS. */
8034
8035 static void
8036 sparc_print_operand_address (FILE *file, rtx x)
8037 {
8038 register rtx base, index = 0;
8039 int offset = 0;
8040 register rtx addr = x;
8041
8042 if (REG_P (addr))
8043 fputs (reg_names[REGNO (addr)], file);
8044 else if (GET_CODE (addr) == PLUS)
8045 {
8046 if (CONST_INT_P (XEXP (addr, 0)))
8047 offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
8048 else if (CONST_INT_P (XEXP (addr, 1)))
8049 offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
8050 else
8051 base = XEXP (addr, 0), index = XEXP (addr, 1);
8052 if (GET_CODE (base) == LO_SUM)
8053 {
8054 gcc_assert (USE_AS_OFFSETABLE_LO10
8055 && TARGET_ARCH64
8056 && ! TARGET_CM_MEDMID);
8057 output_operand (XEXP (base, 0), 0);
8058 fputs ("+%lo(", file);
8059 output_address (XEXP (base, 1));
8060 fprintf (file, ")+%d", offset);
8061 }
8062 else
8063 {
8064 fputs (reg_names[REGNO (base)], file);
8065 if (index == 0)
8066 fprintf (file, "%+d", offset);
8067 else if (REG_P (index))
8068 fprintf (file, "+%s", reg_names[REGNO (index)]);
8069 else if (GET_CODE (index) == SYMBOL_REF
8070 || GET_CODE (index) == LABEL_REF
8071 || GET_CODE (index) == CONST)
8072 fputc ('+', file), output_addr_const (file, index);
8073 else gcc_unreachable ();
8074 }
8075 }
8076 else if (GET_CODE (addr) == MINUS
8077 && GET_CODE (XEXP (addr, 1)) == LABEL_REF)
8078 {
8079 output_addr_const (file, XEXP (addr, 0));
8080 fputs ("-(", file);
8081 output_addr_const (file, XEXP (addr, 1));
8082 fputs ("-.)", file);
8083 }
8084 else if (GET_CODE (addr) == LO_SUM)
8085 {
8086 output_operand (XEXP (addr, 0), 0);
8087 if (TARGET_CM_MEDMID)
8088 fputs ("+%l44(", file);
8089 else
8090 fputs ("+%lo(", file);
8091 output_address (XEXP (addr, 1));
8092 fputc (')', file);
8093 }
8094 else if (flag_pic
8095 && GET_CODE (addr) == CONST
8096 && GET_CODE (XEXP (addr, 0)) == MINUS
8097 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST
8098 && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS
8099 && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx)
8100 {
8101 addr = XEXP (addr, 0);
8102 output_addr_const (file, XEXP (addr, 0));
8103 /* Group the args of the second CONST in parenthesis. */
8104 fputs ("-(", file);
8105 /* Skip past the second CONST--it does nothing for us. */
8106 output_addr_const (file, XEXP (XEXP (addr, 1), 0));
8107 /* Close the parenthesis. */
8108 fputc (')', file);
8109 }
8110 else
8111 {
8112 output_addr_const (file, addr);
8113 }
8114 }
8115 \f
8116 /* Target hook for assembling integer objects. The sparc version has
8117 special handling for aligned DI-mode objects. */
8118
8119 static bool
8120 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
8121 {
8122 /* ??? We only output .xword's for symbols and only then in environments
8123 where the assembler can handle them. */
8124 if (aligned_p && size == 8
8125 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
8126 {
8127 if (TARGET_V9)
8128 {
8129 assemble_integer_with_op ("\t.xword\t", x);
8130 return true;
8131 }
8132 else
8133 {
8134 assemble_aligned_integer (4, const0_rtx);
8135 assemble_aligned_integer (4, x);
8136 return true;
8137 }
8138 }
8139 return default_assemble_integer (x, size, aligned_p);
8140 }
8141 \f
8142 /* Return the value of a code used in the .proc pseudo-op that says
8143 what kind of result this function returns. For non-C types, we pick
8144 the closest C type. */
8145
8146 #ifndef SHORT_TYPE_SIZE
8147 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
8148 #endif
8149
8150 #ifndef INT_TYPE_SIZE
8151 #define INT_TYPE_SIZE BITS_PER_WORD
8152 #endif
8153
8154 #ifndef LONG_TYPE_SIZE
8155 #define LONG_TYPE_SIZE BITS_PER_WORD
8156 #endif
8157
8158 #ifndef LONG_LONG_TYPE_SIZE
8159 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
8160 #endif
8161
8162 #ifndef FLOAT_TYPE_SIZE
8163 #define FLOAT_TYPE_SIZE BITS_PER_WORD
8164 #endif
8165
8166 #ifndef DOUBLE_TYPE_SIZE
8167 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8168 #endif
8169
8170 #ifndef LONG_DOUBLE_TYPE_SIZE
8171 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
8172 #endif
8173
8174 unsigned long
8175 sparc_type_code (register tree type)
8176 {
8177 register unsigned long qualifiers = 0;
8178 register unsigned shift;
8179
8180 /* Only the first 30 bits of the qualifier are valid. We must refrain from
8181 setting more, since some assemblers will give an error for this. Also,
8182 we must be careful to avoid shifts of 32 bits or more to avoid getting
8183 unpredictable results. */
8184
8185 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
8186 {
8187 switch (TREE_CODE (type))
8188 {
8189 case ERROR_MARK:
8190 return qualifiers;
8191
8192 case ARRAY_TYPE:
8193 qualifiers |= (3 << shift);
8194 break;
8195
8196 case FUNCTION_TYPE:
8197 case METHOD_TYPE:
8198 qualifiers |= (2 << shift);
8199 break;
8200
8201 case POINTER_TYPE:
8202 case REFERENCE_TYPE:
8203 case OFFSET_TYPE:
8204 qualifiers |= (1 << shift);
8205 break;
8206
8207 case RECORD_TYPE:
8208 return (qualifiers | 8);
8209
8210 case UNION_TYPE:
8211 case QUAL_UNION_TYPE:
8212 return (qualifiers | 9);
8213
8214 case ENUMERAL_TYPE:
8215 return (qualifiers | 10);
8216
8217 case VOID_TYPE:
8218 return (qualifiers | 16);
8219
8220 case INTEGER_TYPE:
8221 /* If this is a range type, consider it to be the underlying
8222 type. */
8223 if (TREE_TYPE (type) != 0)
8224 break;
8225
8226 /* Carefully distinguish all the standard types of C,
8227 without messing up if the language is not C. We do this by
8228 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
8229 look at both the names and the above fields, but that's redundant.
8230 Any type whose size is between two C types will be considered
8231 to be the wider of the two types. Also, we do not have a
8232 special code to use for "long long", so anything wider than
8233 long is treated the same. Note that we can't distinguish
8234 between "int" and "long" in this code if they are the same
8235 size, but that's fine, since neither can the assembler. */
8236
8237 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
8238 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
8239
8240 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
8241 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
8242
8243 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
8244 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
8245
8246 else
8247 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
8248
8249 case REAL_TYPE:
8250 /* If this is a range type, consider it to be the underlying
8251 type. */
8252 if (TREE_TYPE (type) != 0)
8253 break;
8254
8255 /* Carefully distinguish all the standard types of C,
8256 without messing up if the language is not C. */
8257
8258 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
8259 return (qualifiers | 6);
8260
8261 else
8262 return (qualifiers | 7);
8263
8264 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
8265 /* ??? We need to distinguish between double and float complex types,
8266 but I don't know how yet because I can't reach this code from
8267 existing front-ends. */
8268 return (qualifiers | 7); /* Who knows? */
8269
8270 case VECTOR_TYPE:
8271 case BOOLEAN_TYPE: /* Boolean truth value type. */
8272 case LANG_TYPE:
8273 case NULLPTR_TYPE:
8274 return qualifiers;
8275
8276 default:
8277 gcc_unreachable (); /* Not a type! */
8278 }
8279 }
8280
8281 return qualifiers;
8282 }
8283 \f
8284 /* Nested function support. */
8285
8286 /* Emit RTL insns to initialize the variable parts of a trampoline.
8287 FNADDR is an RTX for the address of the function's pure code.
8288 CXT is an RTX for the static chain value for the function.
8289
8290 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
8291 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
8292 (to store insns). This is a bit excessive. Perhaps a different
8293 mechanism would be better here.
8294
8295 Emit enough FLUSH insns to synchronize the data and instruction caches. */
8296
8297 static void
8298 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8299 {
8300 /* SPARC 32-bit trampoline:
8301
8302 sethi %hi(fn), %g1
8303 sethi %hi(static), %g2
8304 jmp %g1+%lo(fn)
8305 or %g2, %lo(static), %g2
8306
8307 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
8308 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
8309 */
8310
8311 emit_move_insn
8312 (adjust_address (m_tramp, SImode, 0),
8313 expand_binop (SImode, ior_optab,
8314 expand_shift (RSHIFT_EXPR, SImode, fnaddr, 10, 0, 1),
8315 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
8316 NULL_RTX, 1, OPTAB_DIRECT));
8317
8318 emit_move_insn
8319 (adjust_address (m_tramp, SImode, 4),
8320 expand_binop (SImode, ior_optab,
8321 expand_shift (RSHIFT_EXPR, SImode, cxt, 10, 0, 1),
8322 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
8323 NULL_RTX, 1, OPTAB_DIRECT));
8324
8325 emit_move_insn
8326 (adjust_address (m_tramp, SImode, 8),
8327 expand_binop (SImode, ior_optab,
8328 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
8329 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
8330 NULL_RTX, 1, OPTAB_DIRECT));
8331
8332 emit_move_insn
8333 (adjust_address (m_tramp, SImode, 12),
8334 expand_binop (SImode, ior_optab,
8335 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
8336 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
8337 NULL_RTX, 1, OPTAB_DIRECT));
8338
8339 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
8340 aligned on a 16 byte boundary so one flush clears it all. */
8341 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
8342 if (sparc_cpu != PROCESSOR_ULTRASPARC
8343 && sparc_cpu != PROCESSOR_ULTRASPARC3
8344 && sparc_cpu != PROCESSOR_NIAGARA
8345 && sparc_cpu != PROCESSOR_NIAGARA2)
8346 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
8347
8348 /* Call __enable_execute_stack after writing onto the stack to make sure
8349 the stack address is accessible. */
8350 #ifdef HAVE_ENABLE_EXECUTE_STACK
8351 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8352 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8353 #endif
8354
8355 }
8356
8357 /* The 64-bit version is simpler because it makes more sense to load the
8358 values as "immediate" data out of the trampoline. It's also easier since
8359 we can read the PC without clobbering a register. */
8360
8361 static void
8362 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
8363 {
8364 /* SPARC 64-bit trampoline:
8365
8366 rd %pc, %g1
8367 ldx [%g1+24], %g5
8368 jmp %g5
8369 ldx [%g1+16], %g5
8370 +16 bytes data
8371 */
8372
8373 emit_move_insn (adjust_address (m_tramp, SImode, 0),
8374 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
8375 emit_move_insn (adjust_address (m_tramp, SImode, 4),
8376 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
8377 emit_move_insn (adjust_address (m_tramp, SImode, 8),
8378 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
8379 emit_move_insn (adjust_address (m_tramp, SImode, 12),
8380 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
8381 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
8382 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
8383 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
8384
8385 if (sparc_cpu != PROCESSOR_ULTRASPARC
8386 && sparc_cpu != PROCESSOR_ULTRASPARC3
8387 && sparc_cpu != PROCESSOR_NIAGARA
8388 && sparc_cpu != PROCESSOR_NIAGARA2)
8389 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
8390
8391 /* Call __enable_execute_stack after writing onto the stack to make sure
8392 the stack address is accessible. */
8393 #ifdef HAVE_ENABLE_EXECUTE_STACK
8394 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
8395 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
8396 #endif
8397 }
8398
8399 /* Worker for TARGET_TRAMPOLINE_INIT. */
8400
8401 static void
8402 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
8403 {
8404 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
8405 cxt = force_reg (Pmode, cxt);
8406 if (TARGET_ARCH64)
8407 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
8408 else
8409 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
8410 }
8411 \f
8412 /* Adjust the cost of a scheduling dependency. Return the new cost of
8413 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
8414
8415 static int
8416 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8417 {
8418 enum attr_type insn_type;
8419
8420 if (! recog_memoized (insn))
8421 return 0;
8422
8423 insn_type = get_attr_type (insn);
8424
8425 if (REG_NOTE_KIND (link) == 0)
8426 {
8427 /* Data dependency; DEP_INSN writes a register that INSN reads some
8428 cycles later. */
8429
8430 /* if a load, then the dependence must be on the memory address;
8431 add an extra "cycle". Note that the cost could be two cycles
8432 if the reg was written late in an instruction group; we ca not tell
8433 here. */
8434 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
8435 return cost + 3;
8436
8437 /* Get the delay only if the address of the store is the dependence. */
8438 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
8439 {
8440 rtx pat = PATTERN(insn);
8441 rtx dep_pat = PATTERN (dep_insn);
8442
8443 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8444 return cost; /* This should not happen! */
8445
8446 /* The dependency between the two instructions was on the data that
8447 is being stored. Assume that this implies that the address of the
8448 store is not dependent. */
8449 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8450 return cost;
8451
8452 return cost + 3; /* An approximation. */
8453 }
8454
8455 /* A shift instruction cannot receive its data from an instruction
8456 in the same cycle; add a one cycle penalty. */
8457 if (insn_type == TYPE_SHIFT)
8458 return cost + 3; /* Split before cascade into shift. */
8459 }
8460 else
8461 {
8462 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
8463 INSN writes some cycles later. */
8464
8465 /* These are only significant for the fpu unit; writing a fp reg before
8466 the fpu has finished with it stalls the processor. */
8467
8468 /* Reusing an integer register causes no problems. */
8469 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8470 return 0;
8471 }
8472
8473 return cost;
8474 }
8475
8476 static int
8477 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8478 {
8479 enum attr_type insn_type, dep_type;
8480 rtx pat = PATTERN(insn);
8481 rtx dep_pat = PATTERN (dep_insn);
8482
8483 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8484 return cost;
8485
8486 insn_type = get_attr_type (insn);
8487 dep_type = get_attr_type (dep_insn);
8488
8489 switch (REG_NOTE_KIND (link))
8490 {
8491 case 0:
8492 /* Data dependency; DEP_INSN writes a register that INSN reads some
8493 cycles later. */
8494
8495 switch (insn_type)
8496 {
8497 case TYPE_STORE:
8498 case TYPE_FPSTORE:
8499 /* Get the delay iff the address of the store is the dependence. */
8500 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8501 return cost;
8502
8503 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8504 return cost;
8505 return cost + 3;
8506
8507 case TYPE_LOAD:
8508 case TYPE_SLOAD:
8509 case TYPE_FPLOAD:
8510 /* If a load, then the dependence must be on the memory address. If
8511 the addresses aren't equal, then it might be a false dependency */
8512 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8513 {
8514 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8515 || GET_CODE (SET_DEST (dep_pat)) != MEM
8516 || GET_CODE (SET_SRC (pat)) != MEM
8517 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8518 XEXP (SET_SRC (pat), 0)))
8519 return cost + 2;
8520
8521 return cost + 8;
8522 }
8523 break;
8524
8525 case TYPE_BRANCH:
8526 /* Compare to branch latency is 0. There is no benefit from
8527 separating compare and branch. */
8528 if (dep_type == TYPE_COMPARE)
8529 return 0;
8530 /* Floating point compare to branch latency is less than
8531 compare to conditional move. */
8532 if (dep_type == TYPE_FPCMP)
8533 return cost - 1;
8534 break;
8535 default:
8536 break;
8537 }
8538 break;
8539
8540 case REG_DEP_ANTI:
8541 /* Anti-dependencies only penalize the fpu unit. */
8542 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8543 return 0;
8544 break;
8545
8546 default:
8547 break;
8548 }
8549
8550 return cost;
8551 }
8552
8553 static int
8554 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8555 {
8556 switch (sparc_cpu)
8557 {
8558 case PROCESSOR_SUPERSPARC:
8559 cost = supersparc_adjust_cost (insn, link, dep, cost);
8560 break;
8561 case PROCESSOR_HYPERSPARC:
8562 case PROCESSOR_SPARCLITE86X:
8563 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8564 break;
8565 default:
8566 break;
8567 }
8568 return cost;
8569 }
8570
8571 static void
8572 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8573 int sched_verbose ATTRIBUTE_UNUSED,
8574 int max_ready ATTRIBUTE_UNUSED)
8575 {}
8576
8577 static int
8578 sparc_use_sched_lookahead (void)
8579 {
8580 if (sparc_cpu == PROCESSOR_NIAGARA
8581 || sparc_cpu == PROCESSOR_NIAGARA2)
8582 return 0;
8583 if (sparc_cpu == PROCESSOR_ULTRASPARC
8584 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8585 return 4;
8586 if ((1 << sparc_cpu) &
8587 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8588 (1 << PROCESSOR_SPARCLITE86X)))
8589 return 3;
8590 return 0;
8591 }
8592
8593 static int
8594 sparc_issue_rate (void)
8595 {
8596 switch (sparc_cpu)
8597 {
8598 case PROCESSOR_NIAGARA:
8599 case PROCESSOR_NIAGARA2:
8600 default:
8601 return 1;
8602 case PROCESSOR_V9:
8603 /* Assume V9 processors are capable of at least dual-issue. */
8604 return 2;
8605 case PROCESSOR_SUPERSPARC:
8606 return 3;
8607 case PROCESSOR_HYPERSPARC:
8608 case PROCESSOR_SPARCLITE86X:
8609 return 2;
8610 case PROCESSOR_ULTRASPARC:
8611 case PROCESSOR_ULTRASPARC3:
8612 return 4;
8613 }
8614 }
8615
8616 static int
8617 set_extends (rtx insn)
8618 {
8619 register rtx pat = PATTERN (insn);
8620
8621 switch (GET_CODE (SET_SRC (pat)))
8622 {
8623 /* Load and some shift instructions zero extend. */
8624 case MEM:
8625 case ZERO_EXTEND:
8626 /* sethi clears the high bits */
8627 case HIGH:
8628 /* LO_SUM is used with sethi. sethi cleared the high
8629 bits and the values used with lo_sum are positive */
8630 case LO_SUM:
8631 /* Store flag stores 0 or 1 */
8632 case LT: case LTU:
8633 case GT: case GTU:
8634 case LE: case LEU:
8635 case GE: case GEU:
8636 case EQ:
8637 case NE:
8638 return 1;
8639 case AND:
8640 {
8641 rtx op0 = XEXP (SET_SRC (pat), 0);
8642 rtx op1 = XEXP (SET_SRC (pat), 1);
8643 if (GET_CODE (op1) == CONST_INT)
8644 return INTVAL (op1) >= 0;
8645 if (GET_CODE (op0) != REG)
8646 return 0;
8647 if (sparc_check_64 (op0, insn) == 1)
8648 return 1;
8649 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8650 }
8651 case IOR:
8652 case XOR:
8653 {
8654 rtx op0 = XEXP (SET_SRC (pat), 0);
8655 rtx op1 = XEXP (SET_SRC (pat), 1);
8656 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8657 return 0;
8658 if (GET_CODE (op1) == CONST_INT)
8659 return INTVAL (op1) >= 0;
8660 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8661 }
8662 case LSHIFTRT:
8663 return GET_MODE (SET_SRC (pat)) == SImode;
8664 /* Positive integers leave the high bits zero. */
8665 case CONST_DOUBLE:
8666 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8667 case CONST_INT:
8668 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8669 case ASHIFTRT:
8670 case SIGN_EXTEND:
8671 return - (GET_MODE (SET_SRC (pat)) == SImode);
8672 case REG:
8673 return sparc_check_64 (SET_SRC (pat), insn);
8674 default:
8675 return 0;
8676 }
8677 }
8678
8679 /* We _ought_ to have only one kind per function, but... */
8680 static GTY(()) rtx sparc_addr_diff_list;
8681 static GTY(()) rtx sparc_addr_list;
8682
8683 void
8684 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8685 {
8686 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8687 if (diff)
8688 sparc_addr_diff_list
8689 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8690 else
8691 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8692 }
8693
8694 static void
8695 sparc_output_addr_vec (rtx vec)
8696 {
8697 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8698 int idx, vlen = XVECLEN (body, 0);
8699
8700 #ifdef ASM_OUTPUT_ADDR_VEC_START
8701 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8702 #endif
8703
8704 #ifdef ASM_OUTPUT_CASE_LABEL
8705 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8706 NEXT_INSN (lab));
8707 #else
8708 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8709 #endif
8710
8711 for (idx = 0; idx < vlen; idx++)
8712 {
8713 ASM_OUTPUT_ADDR_VEC_ELT
8714 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8715 }
8716
8717 #ifdef ASM_OUTPUT_ADDR_VEC_END
8718 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8719 #endif
8720 }
8721
8722 static void
8723 sparc_output_addr_diff_vec (rtx vec)
8724 {
8725 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8726 rtx base = XEXP (XEXP (body, 0), 0);
8727 int idx, vlen = XVECLEN (body, 1);
8728
8729 #ifdef ASM_OUTPUT_ADDR_VEC_START
8730 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8731 #endif
8732
8733 #ifdef ASM_OUTPUT_CASE_LABEL
8734 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8735 NEXT_INSN (lab));
8736 #else
8737 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8738 #endif
8739
8740 for (idx = 0; idx < vlen; idx++)
8741 {
8742 ASM_OUTPUT_ADDR_DIFF_ELT
8743 (asm_out_file,
8744 body,
8745 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8746 CODE_LABEL_NUMBER (base));
8747 }
8748
8749 #ifdef ASM_OUTPUT_ADDR_VEC_END
8750 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8751 #endif
8752 }
8753
8754 static void
8755 sparc_output_deferred_case_vectors (void)
8756 {
8757 rtx t;
8758 int align;
8759
8760 if (sparc_addr_list == NULL_RTX
8761 && sparc_addr_diff_list == NULL_RTX)
8762 return;
8763
8764 /* Align to cache line in the function's code section. */
8765 switch_to_section (current_function_section ());
8766
8767 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8768 if (align > 0)
8769 ASM_OUTPUT_ALIGN (asm_out_file, align);
8770
8771 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8772 sparc_output_addr_vec (XEXP (t, 0));
8773 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8774 sparc_output_addr_diff_vec (XEXP (t, 0));
8775
8776 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8777 }
8778
8779 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8780 unknown. Return 1 if the high bits are zero, -1 if the register is
8781 sign extended. */
8782 int
8783 sparc_check_64 (rtx x, rtx insn)
8784 {
8785 /* If a register is set only once it is safe to ignore insns this
8786 code does not know how to handle. The loop will either recognize
8787 the single set and return the correct value or fail to recognize
8788 it and return 0. */
8789 int set_once = 0;
8790 rtx y = x;
8791
8792 gcc_assert (GET_CODE (x) == REG);
8793
8794 if (GET_MODE (x) == DImode)
8795 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8796
8797 if (flag_expensive_optimizations
8798 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8799 set_once = 1;
8800
8801 if (insn == 0)
8802 {
8803 if (set_once)
8804 insn = get_last_insn_anywhere ();
8805 else
8806 return 0;
8807 }
8808
8809 while ((insn = PREV_INSN (insn)))
8810 {
8811 switch (GET_CODE (insn))
8812 {
8813 case JUMP_INSN:
8814 case NOTE:
8815 break;
8816 case CODE_LABEL:
8817 case CALL_INSN:
8818 default:
8819 if (! set_once)
8820 return 0;
8821 break;
8822 case INSN:
8823 {
8824 rtx pat = PATTERN (insn);
8825 if (GET_CODE (pat) != SET)
8826 return 0;
8827 if (rtx_equal_p (x, SET_DEST (pat)))
8828 return set_extends (insn);
8829 if (y && rtx_equal_p (y, SET_DEST (pat)))
8830 return set_extends (insn);
8831 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8832 return 0;
8833 }
8834 }
8835 }
8836 return 0;
8837 }
8838
8839 /* Returns assembly code to perform a DImode shift using
8840 a 64-bit global or out register on SPARC-V8+. */
8841 const char *
8842 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8843 {
8844 static char asm_code[60];
8845
8846 /* The scratch register is only required when the destination
8847 register is not a 64-bit global or out register. */
8848 if (which_alternative != 2)
8849 operands[3] = operands[0];
8850
8851 /* We can only shift by constants <= 63. */
8852 if (GET_CODE (operands[2]) == CONST_INT)
8853 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8854
8855 if (GET_CODE (operands[1]) == CONST_INT)
8856 {
8857 output_asm_insn ("mov\t%1, %3", operands);
8858 }
8859 else
8860 {
8861 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8862 if (sparc_check_64 (operands[1], insn) <= 0)
8863 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8864 output_asm_insn ("or\t%L1, %3, %3", operands);
8865 }
8866
8867 strcpy(asm_code, opcode);
8868
8869 if (which_alternative != 2)
8870 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8871 else
8872 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8873 }
8874 \f
8875 /* Output rtl to increment the profiler label LABELNO
8876 for profiling a function entry. */
8877
8878 void
8879 sparc_profile_hook (int labelno)
8880 {
8881 char buf[32];
8882 rtx lab, fun;
8883
8884 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8885 if (NO_PROFILE_COUNTERS)
8886 {
8887 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8888 }
8889 else
8890 {
8891 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8892 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8893 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8894 }
8895 }
8896 \f
8897 #ifdef TARGET_SOLARIS
8898 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8899
8900 static void
8901 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8902 tree decl ATTRIBUTE_UNUSED)
8903 {
8904 if (HAVE_COMDAT_GROUP && flags & SECTION_LINKONCE)
8905 {
8906 solaris_elf_asm_comdat_section (name, flags, decl);
8907 return;
8908 }
8909
8910 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8911
8912 if (!(flags & SECTION_DEBUG))
8913 fputs (",#alloc", asm_out_file);
8914 if (flags & SECTION_WRITE)
8915 fputs (",#write", asm_out_file);
8916 if (flags & SECTION_TLS)
8917 fputs (",#tls", asm_out_file);
8918 if (flags & SECTION_CODE)
8919 fputs (",#execinstr", asm_out_file);
8920
8921 /* ??? Handle SECTION_BSS. */
8922
8923 fputc ('\n', asm_out_file);
8924 }
8925 #endif /* TARGET_SOLARIS */
8926
8927 /* We do not allow indirect calls to be optimized into sibling calls.
8928
8929 We cannot use sibling calls when delayed branches are disabled
8930 because they will likely require the call delay slot to be filled.
8931
8932 Also, on SPARC 32-bit we cannot emit a sibling call when the
8933 current function returns a structure. This is because the "unimp
8934 after call" convention would cause the callee to return to the
8935 wrong place. The generic code already disallows cases where the
8936 function being called returns a structure.
8937
8938 It may seem strange how this last case could occur. Usually there
8939 is code after the call which jumps to epilogue code which dumps the
8940 return value into the struct return area. That ought to invalidate
8941 the sibling call right? Well, in the C++ case we can end up passing
8942 the pointer to the struct return area to a constructor (which returns
8943 void) and then nothing else happens. Such a sibling call would look
8944 valid without the added check here.
8945
8946 VxWorks PIC PLT entries require the global pointer to be initialized
8947 on entry. We therefore can't emit sibling calls to them. */
8948 static bool
8949 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8950 {
8951 return (decl
8952 && flag_delayed_branch
8953 && (TARGET_ARCH64 || ! cfun->returns_struct)
8954 && !(TARGET_VXWORKS_RTP
8955 && flag_pic
8956 && !targetm.binds_local_p (decl)));
8957 }
8958 \f
8959 /* libfunc renaming. */
8960
8961 static void
8962 sparc_init_libfuncs (void)
8963 {
8964 if (TARGET_ARCH32)
8965 {
8966 /* Use the subroutines that Sun's library provides for integer
8967 multiply and divide. The `*' prevents an underscore from
8968 being prepended by the compiler. .umul is a little faster
8969 than .mul. */
8970 set_optab_libfunc (smul_optab, SImode, "*.umul");
8971 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8972 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8973 set_optab_libfunc (smod_optab, SImode, "*.rem");
8974 set_optab_libfunc (umod_optab, SImode, "*.urem");
8975
8976 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8977 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8978 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8979 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8980 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8981 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8982
8983 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8984 is because with soft-float, the SFmode and DFmode sqrt
8985 instructions will be absent, and the compiler will notice and
8986 try to use the TFmode sqrt instruction for calls to the
8987 builtin function sqrt, but this fails. */
8988 if (TARGET_FPU)
8989 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8990
8991 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8992 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8993 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8994 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8995 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8996 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8997
8998 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8999 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
9000 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
9001 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
9002
9003 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
9004 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
9005 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
9006 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
9007
9008 if (DITF_CONVERSION_LIBFUNCS)
9009 {
9010 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
9011 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
9012 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
9013 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
9014 }
9015
9016 if (SUN_CONVERSION_LIBFUNCS)
9017 {
9018 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
9019 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
9020 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
9021 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
9022 }
9023 }
9024 if (TARGET_ARCH64)
9025 {
9026 /* In the SPARC 64bit ABI, SImode multiply and divide functions
9027 do not exist in the library. Make sure the compiler does not
9028 emit calls to them by accident. (It should always use the
9029 hardware instructions.) */
9030 set_optab_libfunc (smul_optab, SImode, 0);
9031 set_optab_libfunc (sdiv_optab, SImode, 0);
9032 set_optab_libfunc (udiv_optab, SImode, 0);
9033 set_optab_libfunc (smod_optab, SImode, 0);
9034 set_optab_libfunc (umod_optab, SImode, 0);
9035
9036 if (SUN_INTEGER_MULTIPLY_64)
9037 {
9038 set_optab_libfunc (smul_optab, DImode, "__mul64");
9039 set_optab_libfunc (sdiv_optab, DImode, "__div64");
9040 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
9041 set_optab_libfunc (smod_optab, DImode, "__rem64");
9042 set_optab_libfunc (umod_optab, DImode, "__urem64");
9043 }
9044
9045 if (SUN_CONVERSION_LIBFUNCS)
9046 {
9047 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
9048 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
9049 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
9050 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
9051 }
9052 }
9053 }
9054 \f
9055 #define def_builtin(NAME, CODE, TYPE) \
9056 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
9057 NULL_TREE)
9058
9059 /* Implement the TARGET_INIT_BUILTINS target hook.
9060 Create builtin functions for special SPARC instructions. */
9061
9062 static void
9063 sparc_init_builtins (void)
9064 {
9065 if (TARGET_VIS)
9066 sparc_vis_init_builtins ();
9067 }
9068
9069 /* Create builtin functions for VIS 1.0 instructions. */
9070
9071 static void
9072 sparc_vis_init_builtins (void)
9073 {
9074 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
9075 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
9076 tree v4hi = build_vector_type (intHI_type_node, 4);
9077 tree v2hi = build_vector_type (intHI_type_node, 2);
9078 tree v2si = build_vector_type (intSI_type_node, 2);
9079
9080 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
9081 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
9082 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
9083 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
9084 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
9085 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
9086 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
9087 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
9088 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
9089 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
9090 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
9091 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
9092 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
9093 v8qi, v8qi,
9094 intDI_type_node, 0);
9095 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
9096 intDI_type_node,
9097 intDI_type_node, 0);
9098 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
9099 ptr_type_node,
9100 intSI_type_node, 0);
9101 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
9102 ptr_type_node,
9103 intDI_type_node, 0);
9104
9105 /* Packing and expanding vectors. */
9106 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
9107 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
9108 v8qi_ftype_v2si_v8qi);
9109 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
9110 v2hi_ftype_v2si);
9111 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
9112 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
9113 v8qi_ftype_v4qi_v4qi);
9114
9115 /* Multiplications. */
9116 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
9117 v4hi_ftype_v4qi_v4hi);
9118 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
9119 v4hi_ftype_v4qi_v2hi);
9120 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
9121 v4hi_ftype_v4qi_v2hi);
9122 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
9123 v4hi_ftype_v8qi_v4hi);
9124 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
9125 v4hi_ftype_v8qi_v4hi);
9126 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
9127 v2si_ftype_v4qi_v2hi);
9128 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
9129 v2si_ftype_v4qi_v2hi);
9130
9131 /* Data aligning. */
9132 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
9133 v4hi_ftype_v4hi_v4hi);
9134 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
9135 v8qi_ftype_v8qi_v8qi);
9136 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
9137 v2si_ftype_v2si_v2si);
9138 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
9139 di_ftype_di_di);
9140 if (TARGET_ARCH64)
9141 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
9142 ptr_ftype_ptr_di);
9143 else
9144 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
9145 ptr_ftype_ptr_si);
9146
9147 /* Pixel distance. */
9148 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
9149 di_ftype_v8qi_v8qi_di);
9150 }
9151
9152 /* Handle TARGET_EXPAND_BUILTIN target hook.
9153 Expand builtin functions for sparc intrinsics. */
9154
9155 static rtx
9156 sparc_expand_builtin (tree exp, rtx target,
9157 rtx subtarget ATTRIBUTE_UNUSED,
9158 enum machine_mode tmode ATTRIBUTE_UNUSED,
9159 int ignore ATTRIBUTE_UNUSED)
9160 {
9161 tree arg;
9162 call_expr_arg_iterator iter;
9163 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9164 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
9165 rtx pat, op[4];
9166 enum machine_mode mode[4];
9167 int arg_count = 0;
9168
9169 mode[0] = insn_data[icode].operand[0].mode;
9170 if (!target
9171 || GET_MODE (target) != mode[0]
9172 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
9173 op[0] = gen_reg_rtx (mode[0]);
9174 else
9175 op[0] = target;
9176
9177 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
9178 {
9179 arg_count++;
9180 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
9181 op[arg_count] = expand_normal (arg);
9182
9183 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
9184 mode[arg_count]))
9185 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
9186 }
9187
9188 switch (arg_count)
9189 {
9190 case 1:
9191 pat = GEN_FCN (icode) (op[0], op[1]);
9192 break;
9193 case 2:
9194 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
9195 break;
9196 case 3:
9197 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
9198 break;
9199 default:
9200 gcc_unreachable ();
9201 }
9202
9203 if (!pat)
9204 return NULL_RTX;
9205
9206 emit_insn (pat);
9207
9208 return op[0];
9209 }
9210
9211 static int
9212 sparc_vis_mul8x16 (int e8, int e16)
9213 {
9214 return (e8 * e16 + 128) / 256;
9215 }
9216
9217 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
9218 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
9219 constants. A tree list with the results of the multiplications is returned,
9220 and each element in the list is of INNER_TYPE. */
9221
9222 static tree
9223 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
9224 {
9225 tree n_elts = NULL_TREE;
9226 int scale;
9227
9228 switch (fncode)
9229 {
9230 case CODE_FOR_fmul8x16_vis:
9231 for (; elts0 && elts1;
9232 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9233 {
9234 int val
9235 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9236 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
9237 n_elts = tree_cons (NULL_TREE,
9238 build_int_cst (inner_type, val),
9239 n_elts);
9240 }
9241 break;
9242
9243 case CODE_FOR_fmul8x16au_vis:
9244 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9245
9246 for (; elts0; elts0 = TREE_CHAIN (elts0))
9247 {
9248 int val
9249 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9250 scale);
9251 n_elts = tree_cons (NULL_TREE,
9252 build_int_cst (inner_type, val),
9253 n_elts);
9254 }
9255 break;
9256
9257 case CODE_FOR_fmul8x16al_vis:
9258 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
9259
9260 for (; elts0; elts0 = TREE_CHAIN (elts0))
9261 {
9262 int val
9263 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9264 scale);
9265 n_elts = tree_cons (NULL_TREE,
9266 build_int_cst (inner_type, val),
9267 n_elts);
9268 }
9269 break;
9270
9271 default:
9272 gcc_unreachable ();
9273 }
9274
9275 return nreverse (n_elts);
9276
9277 }
9278 /* Handle TARGET_FOLD_BUILTIN target hook.
9279 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
9280 result of the function call is ignored. NULL_TREE is returned if the
9281 function could not be folded. */
9282
9283 static tree
9284 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
9285 tree *args, bool ignore)
9286 {
9287 tree arg0, arg1, arg2;
9288 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
9289 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
9290
9291 if (ignore
9292 && icode != CODE_FOR_alignaddrsi_vis
9293 && icode != CODE_FOR_alignaddrdi_vis)
9294 return build_zero_cst (rtype);
9295
9296 switch (icode)
9297 {
9298 case CODE_FOR_fexpand_vis:
9299 arg0 = args[0];
9300 STRIP_NOPS (arg0);
9301
9302 if (TREE_CODE (arg0) == VECTOR_CST)
9303 {
9304 tree inner_type = TREE_TYPE (rtype);
9305 tree elts = TREE_VECTOR_CST_ELTS (arg0);
9306 tree n_elts = NULL_TREE;
9307
9308 for (; elts; elts = TREE_CHAIN (elts))
9309 {
9310 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
9311 n_elts = tree_cons (NULL_TREE,
9312 build_int_cst (inner_type, val),
9313 n_elts);
9314 }
9315 return build_vector (rtype, nreverse (n_elts));
9316 }
9317 break;
9318
9319 case CODE_FOR_fmul8x16_vis:
9320 case CODE_FOR_fmul8x16au_vis:
9321 case CODE_FOR_fmul8x16al_vis:
9322 arg0 = args[0];
9323 arg1 = args[1];
9324 STRIP_NOPS (arg0);
9325 STRIP_NOPS (arg1);
9326
9327 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9328 {
9329 tree inner_type = TREE_TYPE (rtype);
9330 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9331 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9332 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
9333 elts1);
9334
9335 return build_vector (rtype, n_elts);
9336 }
9337 break;
9338
9339 case CODE_FOR_fpmerge_vis:
9340 arg0 = args[0];
9341 arg1 = args[1];
9342 STRIP_NOPS (arg0);
9343 STRIP_NOPS (arg1);
9344
9345 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
9346 {
9347 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9348 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9349 tree n_elts = NULL_TREE;
9350
9351 for (; elts0 && elts1;
9352 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9353 {
9354 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
9355 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
9356 }
9357
9358 return build_vector (rtype, nreverse (n_elts));
9359 }
9360 break;
9361
9362 case CODE_FOR_pdist_vis:
9363 arg0 = args[0];
9364 arg1 = args[1];
9365 arg2 = args[2];
9366 STRIP_NOPS (arg0);
9367 STRIP_NOPS (arg1);
9368 STRIP_NOPS (arg2);
9369
9370 if (TREE_CODE (arg0) == VECTOR_CST
9371 && TREE_CODE (arg1) == VECTOR_CST
9372 && TREE_CODE (arg2) == INTEGER_CST)
9373 {
9374 int overflow = 0;
9375 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
9376 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
9377 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
9378 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
9379
9380 for (; elts0 && elts1;
9381 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
9382 {
9383 unsigned HOST_WIDE_INT
9384 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
9385 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
9386 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
9387 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
9388
9389 unsigned HOST_WIDE_INT l;
9390 HOST_WIDE_INT h;
9391
9392 overflow |= neg_double (low1, high1, &l, &h);
9393 overflow |= add_double (low0, high0, l, h, &l, &h);
9394 if (h < 0)
9395 overflow |= neg_double (l, h, &l, &h);
9396
9397 overflow |= add_double (low, high, l, h, &low, &high);
9398 }
9399
9400 gcc_assert (overflow == 0);
9401
9402 return build_int_cst_wide (rtype, low, high);
9403 }
9404
9405 default:
9406 break;
9407 }
9408
9409 return NULL_TREE;
9410 }
9411 \f
9412 /* ??? This duplicates information provided to the compiler by the
9413 ??? scheduler description. Some day, teach genautomata to output
9414 ??? the latencies and then CSE will just use that. */
9415
9416 static bool
9417 sparc_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
9418 int *total, bool speed ATTRIBUTE_UNUSED)
9419 {
9420 enum machine_mode mode = GET_MODE (x);
9421 bool float_mode_p = FLOAT_MODE_P (mode);
9422
9423 switch (code)
9424 {
9425 case CONST_INT:
9426 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
9427 {
9428 *total = 0;
9429 return true;
9430 }
9431 /* FALLTHRU */
9432
9433 case HIGH:
9434 *total = 2;
9435 return true;
9436
9437 case CONST:
9438 case LABEL_REF:
9439 case SYMBOL_REF:
9440 *total = 4;
9441 return true;
9442
9443 case CONST_DOUBLE:
9444 if (GET_MODE (x) == VOIDmode
9445 && ((CONST_DOUBLE_HIGH (x) == 0
9446 && CONST_DOUBLE_LOW (x) < 0x1000)
9447 || (CONST_DOUBLE_HIGH (x) == -1
9448 && CONST_DOUBLE_LOW (x) < 0
9449 && CONST_DOUBLE_LOW (x) >= -0x1000)))
9450 *total = 0;
9451 else
9452 *total = 8;
9453 return true;
9454
9455 case MEM:
9456 /* If outer-code was a sign or zero extension, a cost
9457 of COSTS_N_INSNS (1) was already added in. This is
9458 why we are subtracting it back out. */
9459 if (outer_code == ZERO_EXTEND)
9460 {
9461 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
9462 }
9463 else if (outer_code == SIGN_EXTEND)
9464 {
9465 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
9466 }
9467 else if (float_mode_p)
9468 {
9469 *total = sparc_costs->float_load;
9470 }
9471 else
9472 {
9473 *total = sparc_costs->int_load;
9474 }
9475
9476 return true;
9477
9478 case PLUS:
9479 case MINUS:
9480 if (float_mode_p)
9481 *total = sparc_costs->float_plusminus;
9482 else
9483 *total = COSTS_N_INSNS (1);
9484 return false;
9485
9486 case MULT:
9487 if (float_mode_p)
9488 *total = sparc_costs->float_mul;
9489 else if (! TARGET_HARD_MUL)
9490 *total = COSTS_N_INSNS (25);
9491 else
9492 {
9493 int bit_cost;
9494
9495 bit_cost = 0;
9496 if (sparc_costs->int_mul_bit_factor)
9497 {
9498 int nbits;
9499
9500 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9501 {
9502 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9503 for (nbits = 0; value != 0; value &= value - 1)
9504 nbits++;
9505 }
9506 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9507 && GET_MODE (XEXP (x, 1)) == VOIDmode)
9508 {
9509 rtx x1 = XEXP (x, 1);
9510 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
9511 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
9512
9513 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
9514 nbits++;
9515 for (; value2 != 0; value2 &= value2 - 1)
9516 nbits++;
9517 }
9518 else
9519 nbits = 7;
9520
9521 if (nbits < 3)
9522 nbits = 3;
9523 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
9524 bit_cost = COSTS_N_INSNS (bit_cost);
9525 }
9526
9527 if (mode == DImode)
9528 *total = sparc_costs->int_mulX + bit_cost;
9529 else
9530 *total = sparc_costs->int_mul + bit_cost;
9531 }
9532 return false;
9533
9534 case ASHIFT:
9535 case ASHIFTRT:
9536 case LSHIFTRT:
9537 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9538 return false;
9539
9540 case DIV:
9541 case UDIV:
9542 case MOD:
9543 case UMOD:
9544 if (float_mode_p)
9545 {
9546 if (mode == DFmode)
9547 *total = sparc_costs->float_div_df;
9548 else
9549 *total = sparc_costs->float_div_sf;
9550 }
9551 else
9552 {
9553 if (mode == DImode)
9554 *total = sparc_costs->int_divX;
9555 else
9556 *total = sparc_costs->int_div;
9557 }
9558 return false;
9559
9560 case NEG:
9561 if (! float_mode_p)
9562 {
9563 *total = COSTS_N_INSNS (1);
9564 return false;
9565 }
9566 /* FALLTHRU */
9567
9568 case ABS:
9569 case FLOAT:
9570 case UNSIGNED_FLOAT:
9571 case FIX:
9572 case UNSIGNED_FIX:
9573 case FLOAT_EXTEND:
9574 case FLOAT_TRUNCATE:
9575 *total = sparc_costs->float_move;
9576 return false;
9577
9578 case SQRT:
9579 if (mode == DFmode)
9580 *total = sparc_costs->float_sqrt_df;
9581 else
9582 *total = sparc_costs->float_sqrt_sf;
9583 return false;
9584
9585 case COMPARE:
9586 if (float_mode_p)
9587 *total = sparc_costs->float_cmp;
9588 else
9589 *total = COSTS_N_INSNS (1);
9590 return false;
9591
9592 case IF_THEN_ELSE:
9593 if (float_mode_p)
9594 *total = sparc_costs->float_cmove;
9595 else
9596 *total = sparc_costs->int_cmove;
9597 return false;
9598
9599 case IOR:
9600 /* Handle the NAND vector patterns. */
9601 if (sparc_vector_mode_supported_p (GET_MODE (x))
9602 && GET_CODE (XEXP (x, 0)) == NOT
9603 && GET_CODE (XEXP (x, 1)) == NOT)
9604 {
9605 *total = COSTS_N_INSNS (1);
9606 return true;
9607 }
9608 else
9609 return false;
9610
9611 default:
9612 return false;
9613 }
9614 }
9615
9616 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
9617
9618 static inline bool
9619 general_or_i64_p (reg_class_t rclass)
9620 {
9621 return (rclass == GENERAL_REGS || rclass == I64_REGS);
9622 }
9623
9624 /* Implement TARGET_REGISTER_MOVE_COST. */
9625
9626 static int
9627 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9628 reg_class_t from, reg_class_t to)
9629 {
9630 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
9631 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
9632 || from == FPCC_REGS
9633 || to == FPCC_REGS)
9634 {
9635 if (sparc_cpu == PROCESSOR_ULTRASPARC
9636 || sparc_cpu == PROCESSOR_ULTRASPARC3
9637 || sparc_cpu == PROCESSOR_NIAGARA
9638 || sparc_cpu == PROCESSOR_NIAGARA2)
9639 return 12;
9640
9641 return 6;
9642 }
9643
9644 return 2;
9645 }
9646
9647 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9648 This is achieved by means of a manual dynamic stack space allocation in
9649 the current frame. We make the assumption that SEQ doesn't contain any
9650 function calls, with the possible exception of calls to the GOT helper. */
9651
9652 static void
9653 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9654 {
9655 /* We must preserve the lowest 16 words for the register save area. */
9656 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9657 /* We really need only 2 words of fresh stack space. */
9658 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9659
9660 rtx slot
9661 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9662 SPARC_STACK_BIAS + offset));
9663
9664 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9665 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9666 if (reg2)
9667 emit_insn (gen_rtx_SET (VOIDmode,
9668 adjust_address (slot, word_mode, UNITS_PER_WORD),
9669 reg2));
9670 emit_insn (seq);
9671 if (reg2)
9672 emit_insn (gen_rtx_SET (VOIDmode,
9673 reg2,
9674 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9675 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9676 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9677 }
9678
9679 /* Output the assembler code for a thunk function. THUNK_DECL is the
9680 declaration for the thunk function itself, FUNCTION is the decl for
9681 the target function. DELTA is an immediate constant offset to be
9682 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9683 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9684
9685 static void
9686 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9687 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9688 tree function)
9689 {
9690 rtx this_rtx, insn, funexp;
9691 unsigned int int_arg_first;
9692
9693 reload_completed = 1;
9694 epilogue_completed = 1;
9695
9696 emit_note (NOTE_INSN_PROLOGUE_END);
9697
9698 if (TARGET_FLAT)
9699 {
9700 sparc_leaf_function_p = 1;
9701
9702 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9703 }
9704 else if (flag_delayed_branch)
9705 {
9706 /* We will emit a regular sibcall below, so we need to instruct
9707 output_sibcall that we are in a leaf function. */
9708 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9709
9710 /* This will cause final.c to invoke leaf_renumber_regs so we
9711 must behave as if we were in a not-yet-leafified function. */
9712 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9713 }
9714 else
9715 {
9716 /* We will emit the sibcall manually below, so we will need to
9717 manually spill non-leaf registers. */
9718 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9719
9720 /* We really are in a leaf function. */
9721 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9722 }
9723
9724 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9725 returns a structure, the structure return pointer is there instead. */
9726 if (TARGET_ARCH64
9727 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9728 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9729 else
9730 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9731
9732 /* Add DELTA. When possible use a plain add, otherwise load it into
9733 a register first. */
9734 if (delta)
9735 {
9736 rtx delta_rtx = GEN_INT (delta);
9737
9738 if (! SPARC_SIMM13_P (delta))
9739 {
9740 rtx scratch = gen_rtx_REG (Pmode, 1);
9741 emit_move_insn (scratch, delta_rtx);
9742 delta_rtx = scratch;
9743 }
9744
9745 /* THIS_RTX += DELTA. */
9746 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9747 }
9748
9749 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9750 if (vcall_offset)
9751 {
9752 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9753 rtx scratch = gen_rtx_REG (Pmode, 1);
9754
9755 gcc_assert (vcall_offset < 0);
9756
9757 /* SCRATCH = *THIS_RTX. */
9758 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9759
9760 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9761 may not have any available scratch register at this point. */
9762 if (SPARC_SIMM13_P (vcall_offset))
9763 ;
9764 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9765 else if (! fixed_regs[5]
9766 /* The below sequence is made up of at least 2 insns,
9767 while the default method may need only one. */
9768 && vcall_offset < -8192)
9769 {
9770 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9771 emit_move_insn (scratch2, vcall_offset_rtx);
9772 vcall_offset_rtx = scratch2;
9773 }
9774 else
9775 {
9776 rtx increment = GEN_INT (-4096);
9777
9778 /* VCALL_OFFSET is a negative number whose typical range can be
9779 estimated as -32768..0 in 32-bit mode. In almost all cases
9780 it is therefore cheaper to emit multiple add insns than
9781 spilling and loading the constant into a register (at least
9782 6 insns). */
9783 while (! SPARC_SIMM13_P (vcall_offset))
9784 {
9785 emit_insn (gen_add2_insn (scratch, increment));
9786 vcall_offset += 4096;
9787 }
9788 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9789 }
9790
9791 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9792 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9793 gen_rtx_PLUS (Pmode,
9794 scratch,
9795 vcall_offset_rtx)));
9796
9797 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9798 emit_insn (gen_add2_insn (this_rtx, scratch));
9799 }
9800
9801 /* Generate a tail call to the target function. */
9802 if (! TREE_USED (function))
9803 {
9804 assemble_external (function);
9805 TREE_USED (function) = 1;
9806 }
9807 funexp = XEXP (DECL_RTL (function), 0);
9808
9809 if (flag_delayed_branch)
9810 {
9811 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9812 insn = emit_call_insn (gen_sibcall (funexp));
9813 SIBLING_CALL_P (insn) = 1;
9814 }
9815 else
9816 {
9817 /* The hoops we have to jump through in order to generate a sibcall
9818 without using delay slots... */
9819 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
9820
9821 if (flag_pic)
9822 {
9823 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9824 start_sequence ();
9825 load_got_register (); /* clobbers %o7 */
9826 scratch = sparc_legitimize_pic_address (funexp, scratch);
9827 seq = get_insns ();
9828 end_sequence ();
9829 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
9830 }
9831 else if (TARGET_ARCH32)
9832 {
9833 emit_insn (gen_rtx_SET (VOIDmode,
9834 scratch,
9835 gen_rtx_HIGH (SImode, funexp)));
9836 emit_insn (gen_rtx_SET (VOIDmode,
9837 scratch,
9838 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9839 }
9840 else /* TARGET_ARCH64 */
9841 {
9842 switch (sparc_cmodel)
9843 {
9844 case CM_MEDLOW:
9845 case CM_MEDMID:
9846 /* The destination can serve as a temporary. */
9847 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9848 break;
9849
9850 case CM_MEDANY:
9851 case CM_EMBMEDANY:
9852 /* The destination cannot serve as a temporary. */
9853 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9854 start_sequence ();
9855 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9856 seq = get_insns ();
9857 end_sequence ();
9858 emit_and_preserve (seq, spill_reg, 0);
9859 break;
9860
9861 default:
9862 gcc_unreachable ();
9863 }
9864 }
9865
9866 emit_jump_insn (gen_indirect_jump (scratch));
9867 }
9868
9869 emit_barrier ();
9870
9871 /* Run just enough of rest_of_compilation to get the insns emitted.
9872 There's not really enough bulk here to make other passes such as
9873 instruction scheduling worth while. Note that use_thunk calls
9874 assemble_start_function and assemble_end_function. */
9875 insn = get_insns ();
9876 insn_locators_alloc ();
9877 shorten_branches (insn);
9878 final_start_function (insn, file, 1);
9879 final (insn, file, 1);
9880 final_end_function ();
9881
9882 reload_completed = 0;
9883 epilogue_completed = 0;
9884 }
9885
9886 /* Return true if sparc_output_mi_thunk would be able to output the
9887 assembler code for the thunk function specified by the arguments
9888 it is passed, and false otherwise. */
9889 static bool
9890 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9891 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9892 HOST_WIDE_INT vcall_offset,
9893 const_tree function ATTRIBUTE_UNUSED)
9894 {
9895 /* Bound the loop used in the default method above. */
9896 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9897 }
9898
9899 /* How to allocate a 'struct machine_function'. */
9900
9901 static struct machine_function *
9902 sparc_init_machine_status (void)
9903 {
9904 return ggc_alloc_cleared_machine_function ();
9905 }
9906
9907 /* Locate some local-dynamic symbol still in use by this function
9908 so that we can print its name in local-dynamic base patterns. */
9909
9910 static const char *
9911 get_some_local_dynamic_name (void)
9912 {
9913 rtx insn;
9914
9915 if (cfun->machine->some_ld_name)
9916 return cfun->machine->some_ld_name;
9917
9918 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9919 if (INSN_P (insn)
9920 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9921 return cfun->machine->some_ld_name;
9922
9923 gcc_unreachable ();
9924 }
9925
9926 static int
9927 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9928 {
9929 rtx x = *px;
9930
9931 if (x
9932 && GET_CODE (x) == SYMBOL_REF
9933 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9934 {
9935 cfun->machine->some_ld_name = XSTR (x, 0);
9936 return 1;
9937 }
9938
9939 return 0;
9940 }
9941
9942 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9943 We need to emit DTP-relative relocations. */
9944
9945 static void
9946 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9947 {
9948 switch (size)
9949 {
9950 case 4:
9951 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9952 break;
9953 case 8:
9954 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9955 break;
9956 default:
9957 gcc_unreachable ();
9958 }
9959 output_addr_const (file, x);
9960 fputs (")", file);
9961 }
9962
9963 /* Do whatever processing is required at the end of a file. */
9964
9965 static void
9966 sparc_file_end (void)
9967 {
9968 /* If we need to emit the special GOT helper function, do so now. */
9969 if (got_helper_rtx)
9970 {
9971 const char *name = XSTR (got_helper_rtx, 0);
9972 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
9973 #ifdef DWARF2_UNWIND_INFO
9974 bool do_cfi;
9975 #endif
9976
9977 if (USE_HIDDEN_LINKONCE)
9978 {
9979 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9980 get_identifier (name),
9981 build_function_type_list (void_type_node,
9982 NULL_TREE));
9983 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9984 NULL_TREE, void_type_node);
9985 TREE_STATIC (decl) = 1;
9986 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9987 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9988 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9989 resolve_unique_section (decl, 0, flag_function_sections);
9990 allocate_struct_function (decl, true);
9991 cfun->is_thunk = 1;
9992 current_function_decl = decl;
9993 init_varasm_status ();
9994 assemble_start_function (decl, name);
9995 }
9996 else
9997 {
9998 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9999 switch_to_section (text_section);
10000 if (align > 0)
10001 ASM_OUTPUT_ALIGN (asm_out_file, align);
10002 ASM_OUTPUT_LABEL (asm_out_file, name);
10003 }
10004
10005 #ifdef DWARF2_UNWIND_INFO
10006 do_cfi = dwarf2out_do_cfi_asm ();
10007 if (do_cfi)
10008 fprintf (asm_out_file, "\t.cfi_startproc\n");
10009 #endif
10010 if (flag_delayed_branch)
10011 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
10012 reg_name, reg_name);
10013 else
10014 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
10015 reg_name, reg_name);
10016 #ifdef DWARF2_UNWIND_INFO
10017 if (do_cfi)
10018 fprintf (asm_out_file, "\t.cfi_endproc\n");
10019 #endif
10020 }
10021
10022 if (NEED_INDICATE_EXEC_STACK)
10023 file_end_indicate_exec_stack ();
10024
10025 #ifdef TARGET_SOLARIS
10026 solaris_file_end ();
10027 #endif
10028 }
10029
10030 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
10031 /* Implement TARGET_MANGLE_TYPE. */
10032
10033 static const char *
10034 sparc_mangle_type (const_tree type)
10035 {
10036 if (!TARGET_64BIT
10037 && TYPE_MAIN_VARIANT (type) == long_double_type_node
10038 && TARGET_LONG_DOUBLE_128)
10039 return "g";
10040
10041 /* For all other types, use normal C++ mangling. */
10042 return NULL;
10043 }
10044 #endif
10045
10046 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
10047 compare and swap on the word containing the byte or half-word. */
10048
10049 void
10050 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
10051 {
10052 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
10053 rtx addr = gen_reg_rtx (Pmode);
10054 rtx off = gen_reg_rtx (SImode);
10055 rtx oldv = gen_reg_rtx (SImode);
10056 rtx newv = gen_reg_rtx (SImode);
10057 rtx oldvalue = gen_reg_rtx (SImode);
10058 rtx newvalue = gen_reg_rtx (SImode);
10059 rtx res = gen_reg_rtx (SImode);
10060 rtx resv = gen_reg_rtx (SImode);
10061 rtx memsi, val, mask, end_label, loop_label, cc;
10062
10063 emit_insn (gen_rtx_SET (VOIDmode, addr,
10064 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
10065
10066 if (Pmode != SImode)
10067 addr1 = gen_lowpart (SImode, addr1);
10068 emit_insn (gen_rtx_SET (VOIDmode, off,
10069 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
10070
10071 memsi = gen_rtx_MEM (SImode, addr);
10072 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
10073 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
10074
10075 val = force_reg (SImode, memsi);
10076
10077 emit_insn (gen_rtx_SET (VOIDmode, off,
10078 gen_rtx_XOR (SImode, off,
10079 GEN_INT (GET_MODE (mem) == QImode
10080 ? 3 : 2))));
10081
10082 emit_insn (gen_rtx_SET (VOIDmode, off,
10083 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
10084
10085 if (GET_MODE (mem) == QImode)
10086 mask = force_reg (SImode, GEN_INT (0xff));
10087 else
10088 mask = force_reg (SImode, GEN_INT (0xffff));
10089
10090 emit_insn (gen_rtx_SET (VOIDmode, mask,
10091 gen_rtx_ASHIFT (SImode, mask, off)));
10092
10093 emit_insn (gen_rtx_SET (VOIDmode, val,
10094 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10095 val)));
10096
10097 oldval = gen_lowpart (SImode, oldval);
10098 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10099 gen_rtx_ASHIFT (SImode, oldval, off)));
10100
10101 newval = gen_lowpart_common (SImode, newval);
10102 emit_insn (gen_rtx_SET (VOIDmode, newv,
10103 gen_rtx_ASHIFT (SImode, newval, off)));
10104
10105 emit_insn (gen_rtx_SET (VOIDmode, oldv,
10106 gen_rtx_AND (SImode, oldv, mask)));
10107
10108 emit_insn (gen_rtx_SET (VOIDmode, newv,
10109 gen_rtx_AND (SImode, newv, mask)));
10110
10111 end_label = gen_label_rtx ();
10112 loop_label = gen_label_rtx ();
10113 emit_label (loop_label);
10114
10115 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
10116 gen_rtx_IOR (SImode, oldv, val)));
10117
10118 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
10119 gen_rtx_IOR (SImode, newv, val)));
10120
10121 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
10122
10123 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
10124
10125 emit_insn (gen_rtx_SET (VOIDmode, resv,
10126 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
10127 res)));
10128
10129 cc = gen_compare_reg_1 (NE, resv, val);
10130 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
10131
10132 /* Use cbranchcc4 to separate the compare and branch! */
10133 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
10134 cc, const0_rtx, loop_label));
10135
10136 emit_label (end_label);
10137
10138 emit_insn (gen_rtx_SET (VOIDmode, res,
10139 gen_rtx_AND (SImode, res, mask)));
10140
10141 emit_insn (gen_rtx_SET (VOIDmode, res,
10142 gen_rtx_LSHIFTRT (SImode, res, off)));
10143
10144 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
10145 }
10146
10147 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
10148
10149 static bool
10150 sparc_frame_pointer_required (void)
10151 {
10152 /* If the stack pointer is dynamically modified in the function, it cannot
10153 serve as the frame pointer. */
10154 if (cfun->calls_alloca)
10155 return true;
10156
10157 /* If the function receives nonlocal gotos, it needs to save the frame
10158 pointer in the nonlocal_goto_save_area object. */
10159 if (cfun->has_nonlocal_label)
10160 return true;
10161
10162 /* In flat mode, that's it. */
10163 if (TARGET_FLAT)
10164 return false;
10165
10166 /* Otherwise, the frame pointer is required if the function isn't leaf. */
10167 return !(current_function_is_leaf && only_leaf_regs_used ());
10168 }
10169
10170 /* The way this is structured, we can't eliminate SFP in favor of SP
10171 if the frame pointer is required: we want to use the SFP->HFP elimination
10172 in that case. But the test in update_eliminables doesn't know we are
10173 assuming below that we only do the former elimination. */
10174
10175 static bool
10176 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
10177 {
10178 return to == HARD_FRAME_POINTER_REGNUM || !sparc_frame_pointer_required ();
10179 }
10180
10181 /* Return the hard frame pointer directly to bypass the stack bias. */
10182
10183 static rtx
10184 sparc_builtin_setjmp_frame_value (void)
10185 {
10186 return hard_frame_pointer_rtx;
10187 }
10188
10189 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
10190 they won't be allocated. */
10191
10192 static void
10193 sparc_conditional_register_usage (void)
10194 {
10195 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
10196 {
10197 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10198 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
10199 }
10200 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
10201 /* then honor it. */
10202 if (TARGET_ARCH32 && fixed_regs[5])
10203 fixed_regs[5] = 1;
10204 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
10205 fixed_regs[5] = 0;
10206 if (! TARGET_V9)
10207 {
10208 int regno;
10209 for (regno = SPARC_FIRST_V9_FP_REG;
10210 regno <= SPARC_LAST_V9_FP_REG;
10211 regno++)
10212 fixed_regs[regno] = 1;
10213 /* %fcc0 is used by v8 and v9. */
10214 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
10215 regno <= SPARC_LAST_V9_FCC_REG;
10216 regno++)
10217 fixed_regs[regno] = 1;
10218 }
10219 if (! TARGET_FPU)
10220 {
10221 int regno;
10222 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
10223 fixed_regs[regno] = 1;
10224 }
10225 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
10226 /* then honor it. Likewise with g3 and g4. */
10227 if (fixed_regs[2] == 2)
10228 fixed_regs[2] = ! TARGET_APP_REGS;
10229 if (fixed_regs[3] == 2)
10230 fixed_regs[3] = ! TARGET_APP_REGS;
10231 if (TARGET_ARCH32 && fixed_regs[4] == 2)
10232 fixed_regs[4] = ! TARGET_APP_REGS;
10233 else if (TARGET_CM_EMBMEDANY)
10234 fixed_regs[4] = 1;
10235 else if (fixed_regs[4] == 2)
10236 fixed_regs[4] = 0;
10237 if (TARGET_FLAT)
10238 {
10239 int regno;
10240 /* Disable leaf functions. */
10241 memset (sparc_leaf_regs, 0, FIRST_PSEUDO_REGISTER);
10242 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
10243 leaf_reg_remap [regno] = regno;
10244 }
10245 }
10246
10247 /* Implement TARGET_PREFERRED_RELOAD_CLASS
10248
10249 - We can't load constants into FP registers.
10250 - We can't load FP constants into integer registers when soft-float,
10251 because there is no soft-float pattern with a r/F constraint.
10252 - We can't load FP constants into integer registers for TFmode unless
10253 it is 0.0L, because there is no movtf pattern with a r/F constraint.
10254 - Try and reload integer constants (symbolic or otherwise) back into
10255 registers directly, rather than having them dumped to memory. */
10256
10257 static reg_class_t
10258 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
10259 {
10260 if (CONSTANT_P (x))
10261 {
10262 if (FP_REG_CLASS_P (rclass)
10263 || rclass == GENERAL_OR_FP_REGS
10264 || rclass == GENERAL_OR_EXTRA_FP_REGS
10265 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
10266 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
10267 return NO_REGS;
10268
10269 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10270 return GENERAL_REGS;
10271 }
10272
10273 return rclass;
10274 }
10275
10276 #include "gt-sparc.h"