03b5e668f7b2f7c7f19eebe80574fe83f6807e8b
[gcc.git] / gcc / config / sparc / sparc.c
1 /* Subroutines for insn-output.c for SPARC.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011
5 Free Software Foundation, Inc.
6 Contributed by Michael Tiemann (tiemann@cygnus.com)
7 64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
8 at Cygnus Support.
9
10 This file is part of GCC.
11
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
15 any later version.
16
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include "tree.h"
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "insn-codes.h"
36 #include "conditions.h"
37 #include "output.h"
38 #include "insn-attr.h"
39 #include "flags.h"
40 #include "function.h"
41 #include "except.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "recog.h"
45 #include "diagnostic-core.h"
46 #include "ggc.h"
47 #include "tm_p.h"
48 #include "debug.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "cfglayout.h"
52 #include "gimple.h"
53 #include "langhooks.h"
54 #include "reload.h"
55 #include "params.h"
56 #include "df.h"
57 #include "dwarf2out.h"
58 #include "opts.h"
59
60 /* Processor costs */
61 static const
62 struct processor_costs cypress_costs = {
63 COSTS_N_INSNS (2), /* int load */
64 COSTS_N_INSNS (2), /* int signed load */
65 COSTS_N_INSNS (2), /* int zeroed load */
66 COSTS_N_INSNS (2), /* float load */
67 COSTS_N_INSNS (5), /* fmov, fneg, fabs */
68 COSTS_N_INSNS (5), /* fadd, fsub */
69 COSTS_N_INSNS (1), /* fcmp */
70 COSTS_N_INSNS (1), /* fmov, fmovr */
71 COSTS_N_INSNS (7), /* fmul */
72 COSTS_N_INSNS (37), /* fdivs */
73 COSTS_N_INSNS (37), /* fdivd */
74 COSTS_N_INSNS (63), /* fsqrts */
75 COSTS_N_INSNS (63), /* fsqrtd */
76 COSTS_N_INSNS (1), /* imul */
77 COSTS_N_INSNS (1), /* imulX */
78 0, /* imul bit factor */
79 COSTS_N_INSNS (1), /* idiv */
80 COSTS_N_INSNS (1), /* idivX */
81 COSTS_N_INSNS (1), /* movcc/movr */
82 0, /* shift penalty */
83 };
84
85 static const
86 struct processor_costs supersparc_costs = {
87 COSTS_N_INSNS (1), /* int load */
88 COSTS_N_INSNS (1), /* int signed load */
89 COSTS_N_INSNS (1), /* int zeroed load */
90 COSTS_N_INSNS (0), /* float load */
91 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
92 COSTS_N_INSNS (3), /* fadd, fsub */
93 COSTS_N_INSNS (3), /* fcmp */
94 COSTS_N_INSNS (1), /* fmov, fmovr */
95 COSTS_N_INSNS (3), /* fmul */
96 COSTS_N_INSNS (6), /* fdivs */
97 COSTS_N_INSNS (9), /* fdivd */
98 COSTS_N_INSNS (12), /* fsqrts */
99 COSTS_N_INSNS (12), /* fsqrtd */
100 COSTS_N_INSNS (4), /* imul */
101 COSTS_N_INSNS (4), /* imulX */
102 0, /* imul bit factor */
103 COSTS_N_INSNS (4), /* idiv */
104 COSTS_N_INSNS (4), /* idivX */
105 COSTS_N_INSNS (1), /* movcc/movr */
106 1, /* shift penalty */
107 };
108
109 static const
110 struct processor_costs hypersparc_costs = {
111 COSTS_N_INSNS (1), /* int load */
112 COSTS_N_INSNS (1), /* int signed load */
113 COSTS_N_INSNS (1), /* int zeroed load */
114 COSTS_N_INSNS (1), /* float load */
115 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
116 COSTS_N_INSNS (1), /* fadd, fsub */
117 COSTS_N_INSNS (1), /* fcmp */
118 COSTS_N_INSNS (1), /* fmov, fmovr */
119 COSTS_N_INSNS (1), /* fmul */
120 COSTS_N_INSNS (8), /* fdivs */
121 COSTS_N_INSNS (12), /* fdivd */
122 COSTS_N_INSNS (17), /* fsqrts */
123 COSTS_N_INSNS (17), /* fsqrtd */
124 COSTS_N_INSNS (17), /* imul */
125 COSTS_N_INSNS (17), /* imulX */
126 0, /* imul bit factor */
127 COSTS_N_INSNS (17), /* idiv */
128 COSTS_N_INSNS (17), /* idivX */
129 COSTS_N_INSNS (1), /* movcc/movr */
130 0, /* shift penalty */
131 };
132
133 static const
134 struct processor_costs leon_costs = {
135 COSTS_N_INSNS (1), /* int load */
136 COSTS_N_INSNS (1), /* int signed load */
137 COSTS_N_INSNS (1), /* int zeroed load */
138 COSTS_N_INSNS (1), /* float load */
139 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
140 COSTS_N_INSNS (1), /* fadd, fsub */
141 COSTS_N_INSNS (1), /* fcmp */
142 COSTS_N_INSNS (1), /* fmov, fmovr */
143 COSTS_N_INSNS (1), /* fmul */
144 COSTS_N_INSNS (15), /* fdivs */
145 COSTS_N_INSNS (15), /* fdivd */
146 COSTS_N_INSNS (23), /* fsqrts */
147 COSTS_N_INSNS (23), /* fsqrtd */
148 COSTS_N_INSNS (5), /* imul */
149 COSTS_N_INSNS (5), /* imulX */
150 0, /* imul bit factor */
151 COSTS_N_INSNS (5), /* idiv */
152 COSTS_N_INSNS (5), /* idivX */
153 COSTS_N_INSNS (1), /* movcc/movr */
154 0, /* shift penalty */
155 };
156
157 static const
158 struct processor_costs sparclet_costs = {
159 COSTS_N_INSNS (3), /* int load */
160 COSTS_N_INSNS (3), /* int signed load */
161 COSTS_N_INSNS (1), /* int zeroed load */
162 COSTS_N_INSNS (1), /* float load */
163 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
164 COSTS_N_INSNS (1), /* fadd, fsub */
165 COSTS_N_INSNS (1), /* fcmp */
166 COSTS_N_INSNS (1), /* fmov, fmovr */
167 COSTS_N_INSNS (1), /* fmul */
168 COSTS_N_INSNS (1), /* fdivs */
169 COSTS_N_INSNS (1), /* fdivd */
170 COSTS_N_INSNS (1), /* fsqrts */
171 COSTS_N_INSNS (1), /* fsqrtd */
172 COSTS_N_INSNS (5), /* imul */
173 COSTS_N_INSNS (5), /* imulX */
174 0, /* imul bit factor */
175 COSTS_N_INSNS (5), /* idiv */
176 COSTS_N_INSNS (5), /* idivX */
177 COSTS_N_INSNS (1), /* movcc/movr */
178 0, /* shift penalty */
179 };
180
181 static const
182 struct processor_costs ultrasparc_costs = {
183 COSTS_N_INSNS (2), /* int load */
184 COSTS_N_INSNS (3), /* int signed load */
185 COSTS_N_INSNS (2), /* int zeroed load */
186 COSTS_N_INSNS (2), /* float load */
187 COSTS_N_INSNS (1), /* fmov, fneg, fabs */
188 COSTS_N_INSNS (4), /* fadd, fsub */
189 COSTS_N_INSNS (1), /* fcmp */
190 COSTS_N_INSNS (2), /* fmov, fmovr */
191 COSTS_N_INSNS (4), /* fmul */
192 COSTS_N_INSNS (13), /* fdivs */
193 COSTS_N_INSNS (23), /* fdivd */
194 COSTS_N_INSNS (13), /* fsqrts */
195 COSTS_N_INSNS (23), /* fsqrtd */
196 COSTS_N_INSNS (4), /* imul */
197 COSTS_N_INSNS (4), /* imulX */
198 2, /* imul bit factor */
199 COSTS_N_INSNS (37), /* idiv */
200 COSTS_N_INSNS (68), /* idivX */
201 COSTS_N_INSNS (2), /* movcc/movr */
202 2, /* shift penalty */
203 };
204
205 static const
206 struct processor_costs ultrasparc3_costs = {
207 COSTS_N_INSNS (2), /* int load */
208 COSTS_N_INSNS (3), /* int signed load */
209 COSTS_N_INSNS (3), /* int zeroed load */
210 COSTS_N_INSNS (2), /* float load */
211 COSTS_N_INSNS (3), /* fmov, fneg, fabs */
212 COSTS_N_INSNS (4), /* fadd, fsub */
213 COSTS_N_INSNS (5), /* fcmp */
214 COSTS_N_INSNS (3), /* fmov, fmovr */
215 COSTS_N_INSNS (4), /* fmul */
216 COSTS_N_INSNS (17), /* fdivs */
217 COSTS_N_INSNS (20), /* fdivd */
218 COSTS_N_INSNS (20), /* fsqrts */
219 COSTS_N_INSNS (29), /* fsqrtd */
220 COSTS_N_INSNS (6), /* imul */
221 COSTS_N_INSNS (6), /* imulX */
222 0, /* imul bit factor */
223 COSTS_N_INSNS (40), /* idiv */
224 COSTS_N_INSNS (71), /* idivX */
225 COSTS_N_INSNS (2), /* movcc/movr */
226 0, /* shift penalty */
227 };
228
229 static const
230 struct processor_costs niagara_costs = {
231 COSTS_N_INSNS (3), /* int load */
232 COSTS_N_INSNS (3), /* int signed load */
233 COSTS_N_INSNS (3), /* int zeroed load */
234 COSTS_N_INSNS (9), /* float load */
235 COSTS_N_INSNS (8), /* fmov, fneg, fabs */
236 COSTS_N_INSNS (8), /* fadd, fsub */
237 COSTS_N_INSNS (26), /* fcmp */
238 COSTS_N_INSNS (8), /* fmov, fmovr */
239 COSTS_N_INSNS (29), /* fmul */
240 COSTS_N_INSNS (54), /* fdivs */
241 COSTS_N_INSNS (83), /* fdivd */
242 COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
243 COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
244 COSTS_N_INSNS (11), /* imul */
245 COSTS_N_INSNS (11), /* imulX */
246 0, /* imul bit factor */
247 COSTS_N_INSNS (72), /* idiv */
248 COSTS_N_INSNS (72), /* idivX */
249 COSTS_N_INSNS (1), /* movcc/movr */
250 0, /* shift penalty */
251 };
252
253 static const
254 struct processor_costs niagara2_costs = {
255 COSTS_N_INSNS (3), /* int load */
256 COSTS_N_INSNS (3), /* int signed load */
257 COSTS_N_INSNS (3), /* int zeroed load */
258 COSTS_N_INSNS (3), /* float load */
259 COSTS_N_INSNS (6), /* fmov, fneg, fabs */
260 COSTS_N_INSNS (6), /* fadd, fsub */
261 COSTS_N_INSNS (6), /* fcmp */
262 COSTS_N_INSNS (6), /* fmov, fmovr */
263 COSTS_N_INSNS (6), /* fmul */
264 COSTS_N_INSNS (19), /* fdivs */
265 COSTS_N_INSNS (33), /* fdivd */
266 COSTS_N_INSNS (19), /* fsqrts */
267 COSTS_N_INSNS (33), /* fsqrtd */
268 COSTS_N_INSNS (5), /* imul */
269 COSTS_N_INSNS (5), /* imulX */
270 0, /* imul bit factor */
271 COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
272 COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
273 COSTS_N_INSNS (1), /* movcc/movr */
274 0, /* shift penalty */
275 };
276
277 const struct processor_costs *sparc_costs = &cypress_costs;
278
279 #ifdef HAVE_AS_RELAX_OPTION
280 /* If 'as' and 'ld' are relaxing tail call insns into branch always, use
281 "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
282 With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
283 somebody does not branch between the sethi and jmp. */
284 #define LEAF_SIBCALL_SLOT_RESERVED_P 1
285 #else
286 #define LEAF_SIBCALL_SLOT_RESERVED_P \
287 ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
288 #endif
289
290 /* Global variables for machine-dependent things. */
291
292 /* Size of frame. Need to know this to emit return insns from leaf procedures.
293 ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
294 reload pass. This is important as the value is later used for scheduling
295 (to see what can go in a delay slot).
296 APPARENT_FSIZE is the size of the stack less the register save area and less
297 the outgoing argument area. It is used when saving call preserved regs. */
298 static HOST_WIDE_INT apparent_fsize;
299 static HOST_WIDE_INT actual_fsize;
300
301 /* Number of live general or floating point registers needed to be
302 saved (as 4-byte quantities). */
303 static int num_gfregs;
304
305 /* Vector to say how input registers are mapped to output registers.
306 HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
307 eliminate it. You must use -fomit-frame-pointer to get that. */
308 char leaf_reg_remap[] =
309 { 0, 1, 2, 3, 4, 5, 6, 7,
310 -1, -1, -1, -1, -1, -1, 14, -1,
311 -1, -1, -1, -1, -1, -1, -1, -1,
312 8, 9, 10, 11, 12, 13, -1, 15,
313
314 32, 33, 34, 35, 36, 37, 38, 39,
315 40, 41, 42, 43, 44, 45, 46, 47,
316 48, 49, 50, 51, 52, 53, 54, 55,
317 56, 57, 58, 59, 60, 61, 62, 63,
318 64, 65, 66, 67, 68, 69, 70, 71,
319 72, 73, 74, 75, 76, 77, 78, 79,
320 80, 81, 82, 83, 84, 85, 86, 87,
321 88, 89, 90, 91, 92, 93, 94, 95,
322 96, 97, 98, 99, 100};
323
324 /* Vector, indexed by hard register number, which contains 1
325 for a register that is allowable in a candidate for leaf
326 function treatment. */
327 char sparc_leaf_regs[] =
328 { 1, 1, 1, 1, 1, 1, 1, 1,
329 0, 0, 0, 0, 0, 0, 1, 0,
330 0, 0, 0, 0, 0, 0, 0, 0,
331 1, 1, 1, 1, 1, 1, 0, 1,
332 1, 1, 1, 1, 1, 1, 1, 1,
333 1, 1, 1, 1, 1, 1, 1, 1,
334 1, 1, 1, 1, 1, 1, 1, 1,
335 1, 1, 1, 1, 1, 1, 1, 1,
336 1, 1, 1, 1, 1, 1, 1, 1,
337 1, 1, 1, 1, 1, 1, 1, 1,
338 1, 1, 1, 1, 1, 1, 1, 1,
339 1, 1, 1, 1, 1, 1, 1, 1,
340 1, 1, 1, 1, 1};
341
342 struct GTY(()) machine_function
343 {
344 /* Some local-dynamic TLS symbol name. */
345 const char *some_ld_name;
346
347 /* True if the current function is leaf and uses only leaf regs,
348 so that the SPARC leaf function optimization can be applied.
349 Private version of current_function_uses_only_leaf_regs, see
350 sparc_expand_prologue for the rationale. */
351 int leaf_function_p;
352
353 /* True if the data calculated by sparc_expand_prologue are valid. */
354 bool prologue_data_valid_p;
355 };
356
357 #define sparc_leaf_function_p cfun->machine->leaf_function_p
358 #define sparc_prologue_data_valid_p cfun->machine->prologue_data_valid_p
359
360 /* Register we pretend to think the frame pointer is allocated to.
361 Normally, this is %fp, but if we are in a leaf procedure, this
362 is %sp+"something". We record "something" separately as it may
363 be too big for reg+constant addressing. */
364 static rtx frame_base_reg;
365 static HOST_WIDE_INT frame_base_offset;
366
367 /* 1 if the next opcode is to be specially indented. */
368 int sparc_indent_opcode = 0;
369
370 static void sparc_option_override (void);
371 static void sparc_init_modes (void);
372 static void scan_record_type (const_tree, int *, int *, int *);
373 static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
374 const_tree, bool, bool, int *, int *);
375
376 static int supersparc_adjust_cost (rtx, rtx, rtx, int);
377 static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
378
379 static void sparc_emit_set_const32 (rtx, rtx);
380 static void sparc_emit_set_const64 (rtx, rtx);
381 static void sparc_output_addr_vec (rtx);
382 static void sparc_output_addr_diff_vec (rtx);
383 static void sparc_output_deferred_case_vectors (void);
384 static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
385 static rtx sparc_builtin_saveregs (void);
386 static int epilogue_renumber (rtx *, int);
387 static bool sparc_assemble_integer (rtx, unsigned int, int);
388 static int set_extends (rtx);
389 static void load_got_register (void);
390 static int save_or_restore_regs (int, int, rtx, int, int);
391 static void emit_save_or_restore_regs (int);
392 static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
393 static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
394 static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
395 tree) ATTRIBUTE_UNUSED;
396 static int sparc_adjust_cost (rtx, rtx, rtx, int);
397 static int sparc_issue_rate (void);
398 static void sparc_sched_init (FILE *, int, int);
399 static int sparc_use_sched_lookahead (void);
400
401 static void emit_soft_tfmode_libcall (const char *, int, rtx *);
402 static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
403 static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
404 static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
405 static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
406
407 static bool sparc_function_ok_for_sibcall (tree, tree);
408 static void sparc_init_libfuncs (void);
409 static void sparc_init_builtins (void);
410 static void sparc_vis_init_builtins (void);
411 static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
412 static tree sparc_fold_builtin (tree, int, tree *, bool);
413 static int sparc_vis_mul8x16 (int, int);
414 static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
415 static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
416 HOST_WIDE_INT, tree);
417 static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
418 HOST_WIDE_INT, const_tree);
419 static struct machine_function * sparc_init_machine_status (void);
420 static bool sparc_cannot_force_const_mem (rtx);
421 static rtx sparc_tls_get_addr (void);
422 static rtx sparc_tls_got (void);
423 static const char *get_some_local_dynamic_name (void);
424 static int get_some_local_dynamic_name_1 (rtx *, void *);
425 static int sparc_register_move_cost (enum machine_mode,
426 reg_class_t, reg_class_t);
427 static bool sparc_rtx_costs (rtx, int, int, int *, bool);
428 static rtx sparc_function_value (const_tree, const_tree, bool);
429 static rtx sparc_libcall_value (enum machine_mode, const_rtx);
430 static bool sparc_function_value_regno_p (const unsigned int);
431 static rtx sparc_struct_value_rtx (tree, int);
432 static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
433 int *, const_tree, int);
434 static bool sparc_return_in_memory (const_tree, const_tree);
435 static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
436 static void sparc_va_start (tree, rtx);
437 static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
438 static bool sparc_vector_mode_supported_p (enum machine_mode);
439 static bool sparc_tls_referenced_p (rtx);
440 static rtx sparc_legitimize_tls_address (rtx);
441 static rtx sparc_legitimize_pic_address (rtx, rtx);
442 static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
443 static rtx sparc_delegitimize_address (rtx);
444 static bool sparc_mode_dependent_address_p (const_rtx);
445 static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
446 enum machine_mode, const_tree, bool);
447 static void sparc_function_arg_advance (CUMULATIVE_ARGS *,
448 enum machine_mode, const_tree, bool);
449 static rtx sparc_function_arg_1 (const CUMULATIVE_ARGS *,
450 enum machine_mode, const_tree, bool, bool);
451 static rtx sparc_function_arg (CUMULATIVE_ARGS *,
452 enum machine_mode, const_tree, bool);
453 static rtx sparc_function_incoming_arg (CUMULATIVE_ARGS *,
454 enum machine_mode, const_tree, bool);
455 static unsigned int sparc_function_arg_boundary (enum machine_mode,
456 const_tree);
457 static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
458 enum machine_mode, tree, bool);
459 static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
460 static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
461 static void sparc_file_end (void);
462 static bool sparc_frame_pointer_required (void);
463 static bool sparc_can_eliminate (const int, const int);
464 static void sparc_conditional_register_usage (void);
465 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
466 static const char *sparc_mangle_type (const_tree);
467 #endif
468 static void sparc_trampoline_init (rtx, tree, rtx);
469 static enum machine_mode sparc_preferred_simd_mode (enum machine_mode);
470 static reg_class_t sparc_preferred_reload_class (rtx x, reg_class_t rclass);
471 \f
472 #ifdef SUBTARGET_ATTRIBUTE_TABLE
473 /* Table of valid machine attributes. */
474 static const struct attribute_spec sparc_attribute_table[] =
475 {
476 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
477 do_diagnostic } */
478 SUBTARGET_ATTRIBUTE_TABLE,
479 { NULL, 0, 0, false, false, false, NULL, false }
480 };
481 #endif
482 \f
483 /* Option handling. */
484
485 /* Parsed value. */
486 enum cmodel sparc_cmodel;
487
488 char sparc_hard_reg_printed[8];
489
490 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
491 static const struct default_options sparc_option_optimization_table[] =
492 {
493 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
494 { OPT_LEVELS_NONE, 0, NULL, 0 }
495 };
496
497 /* Initialize the GCC target structure. */
498
499 /* The default is to use .half rather than .short for aligned HI objects. */
500 #undef TARGET_ASM_ALIGNED_HI_OP
501 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
502
503 #undef TARGET_ASM_UNALIGNED_HI_OP
504 #define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
505 #undef TARGET_ASM_UNALIGNED_SI_OP
506 #define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
507 #undef TARGET_ASM_UNALIGNED_DI_OP
508 #define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
509
510 /* The target hook has to handle DI-mode values. */
511 #undef TARGET_ASM_INTEGER
512 #define TARGET_ASM_INTEGER sparc_assemble_integer
513
514 #undef TARGET_ASM_FUNCTION_PROLOGUE
515 #define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
516 #undef TARGET_ASM_FUNCTION_EPILOGUE
517 #define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
518
519 #undef TARGET_SCHED_ADJUST_COST
520 #define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
521 #undef TARGET_SCHED_ISSUE_RATE
522 #define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
523 #undef TARGET_SCHED_INIT
524 #define TARGET_SCHED_INIT sparc_sched_init
525 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
526 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
527
528 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
529 #define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
530
531 #undef TARGET_INIT_LIBFUNCS
532 #define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
533 #undef TARGET_INIT_BUILTINS
534 #define TARGET_INIT_BUILTINS sparc_init_builtins
535
536 #undef TARGET_LEGITIMIZE_ADDRESS
537 #define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
538 #undef TARGET_DELEGITIMIZE_ADDRESS
539 #define TARGET_DELEGITIMIZE_ADDRESS sparc_delegitimize_address
540 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
541 #define TARGET_MODE_DEPENDENT_ADDRESS_P sparc_mode_dependent_address_p
542
543 #undef TARGET_EXPAND_BUILTIN
544 #define TARGET_EXPAND_BUILTIN sparc_expand_builtin
545 #undef TARGET_FOLD_BUILTIN
546 #define TARGET_FOLD_BUILTIN sparc_fold_builtin
547
548 #if TARGET_TLS
549 #undef TARGET_HAVE_TLS
550 #define TARGET_HAVE_TLS true
551 #endif
552
553 #undef TARGET_CANNOT_FORCE_CONST_MEM
554 #define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
555
556 #undef TARGET_ASM_OUTPUT_MI_THUNK
557 #define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
558 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
559 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
560
561 #undef TARGET_RTX_COSTS
562 #define TARGET_RTX_COSTS sparc_rtx_costs
563 #undef TARGET_ADDRESS_COST
564 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
565 #undef TARGET_REGISTER_MOVE_COST
566 #define TARGET_REGISTER_MOVE_COST sparc_register_move_cost
567
568 #undef TARGET_PROMOTE_FUNCTION_MODE
569 #define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
570
571 #undef TARGET_FUNCTION_VALUE
572 #define TARGET_FUNCTION_VALUE sparc_function_value
573 #undef TARGET_LIBCALL_VALUE
574 #define TARGET_LIBCALL_VALUE sparc_libcall_value
575 #undef TARGET_FUNCTION_VALUE_REGNO_P
576 #define TARGET_FUNCTION_VALUE_REGNO_P sparc_function_value_regno_p
577
578 #undef TARGET_STRUCT_VALUE_RTX
579 #define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
580 #undef TARGET_RETURN_IN_MEMORY
581 #define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
582 #undef TARGET_MUST_PASS_IN_STACK
583 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
584 #undef TARGET_PASS_BY_REFERENCE
585 #define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
586 #undef TARGET_ARG_PARTIAL_BYTES
587 #define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
588 #undef TARGET_FUNCTION_ARG_ADVANCE
589 #define TARGET_FUNCTION_ARG_ADVANCE sparc_function_arg_advance
590 #undef TARGET_FUNCTION_ARG
591 #define TARGET_FUNCTION_ARG sparc_function_arg
592 #undef TARGET_FUNCTION_INCOMING_ARG
593 #define TARGET_FUNCTION_INCOMING_ARG sparc_function_incoming_arg
594 #undef TARGET_FUNCTION_ARG_BOUNDARY
595 #define TARGET_FUNCTION_ARG_BOUNDARY sparc_function_arg_boundary
596
597 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
598 #define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
599 #undef TARGET_STRICT_ARGUMENT_NAMING
600 #define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
601
602 #undef TARGET_EXPAND_BUILTIN_VA_START
603 #define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
604 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
605 #define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
606
607 #undef TARGET_VECTOR_MODE_SUPPORTED_P
608 #define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
609
610 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
611 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE sparc_preferred_simd_mode
612
613 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
614 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
615
616 #ifdef SUBTARGET_INSERT_ATTRIBUTES
617 #undef TARGET_INSERT_ATTRIBUTES
618 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
619 #endif
620
621 #ifdef SUBTARGET_ATTRIBUTE_TABLE
622 #undef TARGET_ATTRIBUTE_TABLE
623 #define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
624 #endif
625
626 #undef TARGET_RELAXED_ORDERING
627 #define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
628
629 #undef TARGET_DEFAULT_TARGET_FLAGS
630 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
631 #undef TARGET_OPTION_OVERRIDE
632 #define TARGET_OPTION_OVERRIDE sparc_option_override
633 #undef TARGET_OPTION_OPTIMIZATION_TABLE
634 #define TARGET_OPTION_OPTIMIZATION_TABLE sparc_option_optimization_table
635
636 #if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
637 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
638 #define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
639 #endif
640
641 #undef TARGET_ASM_FILE_END
642 #define TARGET_ASM_FILE_END sparc_file_end
643
644 #undef TARGET_FRAME_POINTER_REQUIRED
645 #define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
646
647 #undef TARGET_CAN_ELIMINATE
648 #define TARGET_CAN_ELIMINATE sparc_can_eliminate
649 #undef TARGET_PREFERRED_RELOAD_CLASS
650 #define TARGET_PREFERRED_RELOAD_CLASS sparc_preferred_reload_class
651
652 #undef TARGET_CONDITIONAL_REGISTER_USAGE
653 #define TARGET_CONDITIONAL_REGISTER_USAGE sparc_conditional_register_usage
654
655 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
656 #undef TARGET_MANGLE_TYPE
657 #define TARGET_MANGLE_TYPE sparc_mangle_type
658 #endif
659
660 #undef TARGET_LEGITIMATE_ADDRESS_P
661 #define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
662
663 #undef TARGET_TRAMPOLINE_INIT
664 #define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
665
666 struct gcc_target targetm = TARGET_INITIALIZER;
667
668 /* Validate and override various options, and do some machine dependent
669 initialization. */
670
671 static void
672 sparc_option_override (void)
673 {
674 static struct code_model {
675 const char *const name;
676 const enum cmodel value;
677 } const cmodels[] = {
678 { "32", CM_32 },
679 { "medlow", CM_MEDLOW },
680 { "medmid", CM_MEDMID },
681 { "medany", CM_MEDANY },
682 { "embmedany", CM_EMBMEDANY },
683 { NULL, (enum cmodel) 0 }
684 };
685 const struct code_model *cmodel;
686 /* Map TARGET_CPU_DEFAULT to value for -m{cpu,tune}=. */
687 static struct cpu_default {
688 const int cpu;
689 const enum processor_type processor;
690 } const cpu_default[] = {
691 /* There must be one entry here for each TARGET_CPU value. */
692 { TARGET_CPU_sparc, PROCESSOR_CYPRESS },
693 { TARGET_CPU_v8, PROCESSOR_V8 },
694 { TARGET_CPU_supersparc, PROCESSOR_SUPERSPARC },
695 { TARGET_CPU_hypersparc, PROCESSOR_HYPERSPARC },
696 { TARGET_CPU_leon, PROCESSOR_LEON },
697 { TARGET_CPU_sparclite, PROCESSOR_F930 },
698 { TARGET_CPU_sparclite86x, PROCESSOR_SPARCLITE86X },
699 { TARGET_CPU_sparclet, PROCESSOR_TSC701 },
700 { TARGET_CPU_v9, PROCESSOR_V9 },
701 { TARGET_CPU_ultrasparc, PROCESSOR_ULTRASPARC },
702 { TARGET_CPU_ultrasparc3, PROCESSOR_ULTRASPARC3 },
703 { TARGET_CPU_niagara, PROCESSOR_NIAGARA },
704 { TARGET_CPU_niagara2, PROCESSOR_NIAGARA2 },
705 { -1, PROCESSOR_V7 }
706 };
707 const struct cpu_default *def;
708 /* Table of values for -m{cpu,tune}=. This must match the order of
709 the PROCESSOR_* enumeration. */
710 static struct cpu_table {
711 const int disable;
712 const int enable;
713 } const cpu_table[] = {
714 { MASK_ISA, 0 },
715 { MASK_ISA, 0 },
716 { MASK_ISA, MASK_V8 },
717 /* TI TMS390Z55 supersparc */
718 { MASK_ISA, MASK_V8 },
719 { MASK_ISA, MASK_V8|MASK_FPU },
720 /* LEON */
721 { MASK_ISA, MASK_V8|MASK_FPU },
722 { MASK_ISA, MASK_SPARCLITE },
723 /* The Fujitsu MB86930 is the original sparclite chip, with no FPU. */
724 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
725 /* The Fujitsu MB86934 is the recent sparclite chip, with an FPU. */
726 { MASK_ISA, MASK_SPARCLITE|MASK_FPU },
727 { MASK_ISA|MASK_FPU, MASK_SPARCLITE },
728 { MASK_ISA, MASK_SPARCLET },
729 /* TEMIC sparclet */
730 { MASK_ISA, MASK_SPARCLET },
731 { MASK_ISA, MASK_V9 },
732 /* UltraSPARC I, II, IIi */
733 { MASK_ISA,
734 /* Although insns using %y are deprecated, it is a clear win. */
735 MASK_V9|MASK_DEPRECATED_V8_INSNS},
736 /* UltraSPARC III */
737 /* ??? Check if %y issue still holds true. */
738 { MASK_ISA,
739 MASK_V9|MASK_DEPRECATED_V8_INSNS},
740 /* UltraSPARC T1 */
741 { MASK_ISA,
742 MASK_V9|MASK_DEPRECATED_V8_INSNS},
743 /* UltraSPARC T2 */
744 { MASK_ISA, MASK_V9},
745 };
746 const struct cpu_table *cpu;
747 int fpu;
748
749 #ifdef SUBTARGET_OVERRIDE_OPTIONS
750 SUBTARGET_OVERRIDE_OPTIONS;
751 #endif
752
753 #ifndef SPARC_BI_ARCH
754 /* Check for unsupported architecture size. */
755 if (! TARGET_64BIT != DEFAULT_ARCH32_P)
756 error ("%s is not supported by this configuration",
757 DEFAULT_ARCH32_P ? "-m64" : "-m32");
758 #endif
759
760 /* We force all 64bit archs to use 128 bit long double */
761 if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
762 {
763 error ("-mlong-double-64 not allowed with -m64");
764 target_flags |= MASK_LONG_DOUBLE_128;
765 }
766
767 /* Code model selection. */
768 sparc_cmodel = SPARC_DEFAULT_CMODEL;
769
770 #ifdef SPARC_BI_ARCH
771 if (TARGET_ARCH32)
772 sparc_cmodel = CM_32;
773 #endif
774
775 if (sparc_cmodel_string != NULL)
776 {
777 if (TARGET_ARCH64)
778 {
779 for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
780 if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
781 break;
782 if (cmodel->name == NULL)
783 error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
784 else
785 sparc_cmodel = cmodel->value;
786 }
787 else
788 error ("-mcmodel= is not supported on 32 bit systems");
789 }
790
791 fpu = target_flags & MASK_FPU; /* save current -mfpu status */
792
793 /* Set the default CPU. */
794 if (!global_options_set.x_sparc_cpu_and_features)
795 {
796 for (def = &cpu_default[0]; def->cpu != -1; ++def)
797 if (def->cpu == TARGET_CPU_DEFAULT)
798 break;
799 gcc_assert (def->cpu != -1);
800 sparc_cpu_and_features = def->processor;
801 }
802 if (!global_options_set.x_sparc_cpu)
803 sparc_cpu = sparc_cpu_and_features;
804
805 cpu = &cpu_table[(int) sparc_cpu_and_features];
806 target_flags &= ~cpu->disable;
807 target_flags |= cpu->enable;
808
809 /* If -mfpu or -mno-fpu was explicitly used, don't override with
810 the processor default. */
811 if (target_flags_explicit & MASK_FPU)
812 target_flags = (target_flags & ~MASK_FPU) | fpu;
813
814 /* Don't allow -mvis if FPU is disabled. */
815 if (! TARGET_FPU)
816 target_flags &= ~MASK_VIS;
817
818 /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
819 are available.
820 -m64 also implies v9. */
821 if (TARGET_VIS || TARGET_ARCH64)
822 {
823 target_flags |= MASK_V9;
824 target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
825 }
826
827 /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
828 if (TARGET_V9 && TARGET_ARCH32)
829 target_flags |= MASK_DEPRECATED_V8_INSNS;
830
831 /* V8PLUS requires V9, makes no sense in 64 bit mode. */
832 if (! TARGET_V9 || TARGET_ARCH64)
833 target_flags &= ~MASK_V8PLUS;
834
835 /* Don't use stack biasing in 32 bit mode. */
836 if (TARGET_ARCH32)
837 target_flags &= ~MASK_STACK_BIAS;
838
839 /* Supply a default value for align_functions. */
840 if (align_functions == 0
841 && (sparc_cpu == PROCESSOR_ULTRASPARC
842 || sparc_cpu == PROCESSOR_ULTRASPARC3
843 || sparc_cpu == PROCESSOR_NIAGARA
844 || sparc_cpu == PROCESSOR_NIAGARA2))
845 align_functions = 32;
846
847 /* Validate PCC_STRUCT_RETURN. */
848 if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
849 flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
850
851 /* Only use .uaxword when compiling for a 64-bit target. */
852 if (!TARGET_ARCH64)
853 targetm.asm_out.unaligned_op.di = NULL;
854
855 /* Do various machine dependent initializations. */
856 sparc_init_modes ();
857
858 /* Set up function hooks. */
859 init_machine_status = sparc_init_machine_status;
860
861 switch (sparc_cpu)
862 {
863 case PROCESSOR_V7:
864 case PROCESSOR_CYPRESS:
865 sparc_costs = &cypress_costs;
866 break;
867 case PROCESSOR_V8:
868 case PROCESSOR_SPARCLITE:
869 case PROCESSOR_SUPERSPARC:
870 sparc_costs = &supersparc_costs;
871 break;
872 case PROCESSOR_F930:
873 case PROCESSOR_F934:
874 case PROCESSOR_HYPERSPARC:
875 case PROCESSOR_SPARCLITE86X:
876 sparc_costs = &hypersparc_costs;
877 break;
878 case PROCESSOR_LEON:
879 sparc_costs = &leon_costs;
880 break;
881 case PROCESSOR_SPARCLET:
882 case PROCESSOR_TSC701:
883 sparc_costs = &sparclet_costs;
884 break;
885 case PROCESSOR_V9:
886 case PROCESSOR_ULTRASPARC:
887 sparc_costs = &ultrasparc_costs;
888 break;
889 case PROCESSOR_ULTRASPARC3:
890 sparc_costs = &ultrasparc3_costs;
891 break;
892 case PROCESSOR_NIAGARA:
893 sparc_costs = &niagara_costs;
894 break;
895 case PROCESSOR_NIAGARA2:
896 sparc_costs = &niagara2_costs;
897 break;
898 };
899
900 #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
901 if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
902 target_flags |= MASK_LONG_DOUBLE_128;
903 #endif
904
905 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
906 ((sparc_cpu == PROCESSOR_ULTRASPARC
907 || sparc_cpu == PROCESSOR_NIAGARA
908 || sparc_cpu == PROCESSOR_NIAGARA2)
909 ? 2
910 : (sparc_cpu == PROCESSOR_ULTRASPARC3
911 ? 8 : 3)),
912 global_options.x_param_values,
913 global_options_set.x_param_values);
914 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
915 ((sparc_cpu == PROCESSOR_ULTRASPARC
916 || sparc_cpu == PROCESSOR_ULTRASPARC3
917 || sparc_cpu == PROCESSOR_NIAGARA
918 || sparc_cpu == PROCESSOR_NIAGARA2)
919 ? 64 : 32),
920 global_options.x_param_values,
921 global_options_set.x_param_values);
922 }
923 \f
924 /* Miscellaneous utilities. */
925
926 /* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
927 or branch on register contents instructions. */
928
929 int
930 v9_regcmp_p (enum rtx_code code)
931 {
932 return (code == EQ || code == NE || code == GE || code == LT
933 || code == LE || code == GT);
934 }
935
936 /* Nonzero if OP is a floating point constant which can
937 be loaded into an integer register using a single
938 sethi instruction. */
939
940 int
941 fp_sethi_p (rtx op)
942 {
943 if (GET_CODE (op) == CONST_DOUBLE)
944 {
945 REAL_VALUE_TYPE r;
946 long i;
947
948 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
949 REAL_VALUE_TO_TARGET_SINGLE (r, i);
950 return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
951 }
952
953 return 0;
954 }
955
956 /* Nonzero if OP is a floating point constant which can
957 be loaded into an integer register using a single
958 mov instruction. */
959
960 int
961 fp_mov_p (rtx op)
962 {
963 if (GET_CODE (op) == CONST_DOUBLE)
964 {
965 REAL_VALUE_TYPE r;
966 long i;
967
968 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
969 REAL_VALUE_TO_TARGET_SINGLE (r, i);
970 return SPARC_SIMM13_P (i);
971 }
972
973 return 0;
974 }
975
976 /* Nonzero if OP is a floating point constant which can
977 be loaded into an integer register using a high/losum
978 instruction sequence. */
979
980 int
981 fp_high_losum_p (rtx op)
982 {
983 /* The constraints calling this should only be in
984 SFmode move insns, so any constant which cannot
985 be moved using a single insn will do. */
986 if (GET_CODE (op) == CONST_DOUBLE)
987 {
988 REAL_VALUE_TYPE r;
989 long i;
990
991 REAL_VALUE_FROM_CONST_DOUBLE (r, op);
992 REAL_VALUE_TO_TARGET_SINGLE (r, i);
993 return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
994 }
995
996 return 0;
997 }
998
999 /* Return true if the address of LABEL can be loaded by means of the
1000 mov{si,di}_pic_label_ref patterns in PIC mode. */
1001
1002 static bool
1003 can_use_mov_pic_label_ref (rtx label)
1004 {
1005 /* VxWorks does not impose a fixed gap between segments; the run-time
1006 gap can be different from the object-file gap. We therefore can't
1007 assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1008 are absolutely sure that X is in the same segment as the GOT.
1009 Unfortunately, the flexibility of linker scripts means that we
1010 can't be sure of that in general, so assume that GOT-relative
1011 accesses are never valid on VxWorks. */
1012 if (TARGET_VXWORKS_RTP)
1013 return false;
1014
1015 /* Similarly, if the label is non-local, it might end up being placed
1016 in a different section than the current one; now mov_pic_label_ref
1017 requires the label and the code to be in the same section. */
1018 if (LABEL_REF_NONLOCAL_P (label))
1019 return false;
1020
1021 /* Finally, if we are reordering basic blocks and partition into hot
1022 and cold sections, this might happen for any label. */
1023 if (flag_reorder_blocks_and_partition)
1024 return false;
1025
1026 return true;
1027 }
1028
1029 /* Expand a move instruction. Return true if all work is done. */
1030
1031 bool
1032 sparc_expand_move (enum machine_mode mode, rtx *operands)
1033 {
1034 /* Handle sets of MEM first. */
1035 if (GET_CODE (operands[0]) == MEM)
1036 {
1037 /* 0 is a register (or a pair of registers) on SPARC. */
1038 if (register_or_zero_operand (operands[1], mode))
1039 return false;
1040
1041 if (!reload_in_progress)
1042 {
1043 operands[0] = validize_mem (operands[0]);
1044 operands[1] = force_reg (mode, operands[1]);
1045 }
1046 }
1047
1048 /* Fixup TLS cases. */
1049 if (TARGET_HAVE_TLS
1050 && CONSTANT_P (operands[1])
1051 && sparc_tls_referenced_p (operands [1]))
1052 {
1053 operands[1] = sparc_legitimize_tls_address (operands[1]);
1054 return false;
1055 }
1056
1057 /* Fixup PIC cases. */
1058 if (flag_pic && CONSTANT_P (operands[1]))
1059 {
1060 if (pic_address_needs_scratch (operands[1]))
1061 operands[1] = sparc_legitimize_pic_address (operands[1], NULL_RTX);
1062
1063 /* We cannot use the mov{si,di}_pic_label_ref patterns in all cases. */
1064 if (GET_CODE (operands[1]) == LABEL_REF
1065 && can_use_mov_pic_label_ref (operands[1]))
1066 {
1067 if (mode == SImode)
1068 {
1069 emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1070 return true;
1071 }
1072
1073 if (mode == DImode)
1074 {
1075 gcc_assert (TARGET_ARCH64);
1076 emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1077 return true;
1078 }
1079 }
1080
1081 if (symbolic_operand (operands[1], mode))
1082 {
1083 operands[1]
1084 = sparc_legitimize_pic_address (operands[1],
1085 reload_in_progress
1086 ? operands[0] : NULL_RTX);
1087 return false;
1088 }
1089 }
1090
1091 /* If we are trying to toss an integer constant into FP registers,
1092 or loading a FP or vector constant, force it into memory. */
1093 if (CONSTANT_P (operands[1])
1094 && REG_P (operands[0])
1095 && (SPARC_FP_REG_P (REGNO (operands[0]))
1096 || SCALAR_FLOAT_MODE_P (mode)
1097 || VECTOR_MODE_P (mode)))
1098 {
1099 /* emit_group_store will send such bogosity to us when it is
1100 not storing directly into memory. So fix this up to avoid
1101 crashes in output_constant_pool. */
1102 if (operands [1] == const0_rtx)
1103 operands[1] = CONST0_RTX (mode);
1104
1105 /* We can clear FP registers if TARGET_VIS, and always other regs. */
1106 if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1107 && const_zero_operand (operands[1], mode))
1108 return false;
1109
1110 if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1111 /* We are able to build any SF constant in integer registers
1112 with at most 2 instructions. */
1113 && (mode == SFmode
1114 /* And any DF constant in integer registers. */
1115 || (mode == DFmode
1116 && (reload_completed || reload_in_progress))))
1117 return false;
1118
1119 operands[1] = force_const_mem (mode, operands[1]);
1120 if (!reload_in_progress)
1121 operands[1] = validize_mem (operands[1]);
1122 return false;
1123 }
1124
1125 /* Accept non-constants and valid constants unmodified. */
1126 if (!CONSTANT_P (operands[1])
1127 || GET_CODE (operands[1]) == HIGH
1128 || input_operand (operands[1], mode))
1129 return false;
1130
1131 switch (mode)
1132 {
1133 case QImode:
1134 /* All QImode constants require only one insn, so proceed. */
1135 break;
1136
1137 case HImode:
1138 case SImode:
1139 sparc_emit_set_const32 (operands[0], operands[1]);
1140 return true;
1141
1142 case DImode:
1143 /* input_operand should have filtered out 32-bit mode. */
1144 sparc_emit_set_const64 (operands[0], operands[1]);
1145 return true;
1146
1147 default:
1148 gcc_unreachable ();
1149 }
1150
1151 return false;
1152 }
1153
1154 /* Load OP1, a 32-bit constant, into OP0, a register.
1155 We know it can't be done in one insn when we get
1156 here, the move expander guarantees this. */
1157
1158 static void
1159 sparc_emit_set_const32 (rtx op0, rtx op1)
1160 {
1161 enum machine_mode mode = GET_MODE (op0);
1162 rtx temp;
1163
1164 if (reload_in_progress || reload_completed)
1165 temp = op0;
1166 else
1167 temp = gen_reg_rtx (mode);
1168
1169 if (GET_CODE (op1) == CONST_INT)
1170 {
1171 gcc_assert (!small_int_operand (op1, mode)
1172 && !const_high_operand (op1, mode));
1173
1174 /* Emit them as real moves instead of a HIGH/LO_SUM,
1175 this way CSE can see everything and reuse intermediate
1176 values if it wants. */
1177 emit_insn (gen_rtx_SET (VOIDmode, temp,
1178 GEN_INT (INTVAL (op1)
1179 & ~(HOST_WIDE_INT)0x3ff)));
1180
1181 emit_insn (gen_rtx_SET (VOIDmode,
1182 op0,
1183 gen_rtx_IOR (mode, temp,
1184 GEN_INT (INTVAL (op1) & 0x3ff))));
1185 }
1186 else
1187 {
1188 /* A symbol, emit in the traditional way. */
1189 emit_insn (gen_rtx_SET (VOIDmode, temp,
1190 gen_rtx_HIGH (mode, op1)));
1191 emit_insn (gen_rtx_SET (VOIDmode,
1192 op0, gen_rtx_LO_SUM (mode, temp, op1)));
1193 }
1194 }
1195
1196 /* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1197 If TEMP is nonzero, we are forbidden to use any other scratch
1198 registers. Otherwise, we are allowed to generate them as needed.
1199
1200 Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1201 or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns). */
1202
1203 void
1204 sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1205 {
1206 rtx temp1, temp2, temp3, temp4, temp5;
1207 rtx ti_temp = 0;
1208
1209 if (temp && GET_MODE (temp) == TImode)
1210 {
1211 ti_temp = temp;
1212 temp = gen_rtx_REG (DImode, REGNO (temp));
1213 }
1214
1215 /* SPARC-V9 code-model support. */
1216 switch (sparc_cmodel)
1217 {
1218 case CM_MEDLOW:
1219 /* The range spanned by all instructions in the object is less
1220 than 2^31 bytes (2GB) and the distance from any instruction
1221 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1222 than 2^31 bytes (2GB).
1223
1224 The executable must be in the low 4TB of the virtual address
1225 space.
1226
1227 sethi %hi(symbol), %temp1
1228 or %temp1, %lo(symbol), %reg */
1229 if (temp)
1230 temp1 = temp; /* op0 is allowed. */
1231 else
1232 temp1 = gen_reg_rtx (DImode);
1233
1234 emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1235 emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1236 break;
1237
1238 case CM_MEDMID:
1239 /* The range spanned by all instructions in the object is less
1240 than 2^31 bytes (2GB) and the distance from any instruction
1241 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1242 than 2^31 bytes (2GB).
1243
1244 The executable must be in the low 16TB of the virtual address
1245 space.
1246
1247 sethi %h44(symbol), %temp1
1248 or %temp1, %m44(symbol), %temp2
1249 sllx %temp2, 12, %temp3
1250 or %temp3, %l44(symbol), %reg */
1251 if (temp)
1252 {
1253 temp1 = op0;
1254 temp2 = op0;
1255 temp3 = temp; /* op0 is allowed. */
1256 }
1257 else
1258 {
1259 temp1 = gen_reg_rtx (DImode);
1260 temp2 = gen_reg_rtx (DImode);
1261 temp3 = gen_reg_rtx (DImode);
1262 }
1263
1264 emit_insn (gen_seth44 (temp1, op1));
1265 emit_insn (gen_setm44 (temp2, temp1, op1));
1266 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1267 gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1268 emit_insn (gen_setl44 (op0, temp3, op1));
1269 break;
1270
1271 case CM_MEDANY:
1272 /* The range spanned by all instructions in the object is less
1273 than 2^31 bytes (2GB) and the distance from any instruction
1274 to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1275 than 2^31 bytes (2GB).
1276
1277 The executable can be placed anywhere in the virtual address
1278 space.
1279
1280 sethi %hh(symbol), %temp1
1281 sethi %lm(symbol), %temp2
1282 or %temp1, %hm(symbol), %temp3
1283 sllx %temp3, 32, %temp4
1284 or %temp4, %temp2, %temp5
1285 or %temp5, %lo(symbol), %reg */
1286 if (temp)
1287 {
1288 /* It is possible that one of the registers we got for operands[2]
1289 might coincide with that of operands[0] (which is why we made
1290 it TImode). Pick the other one to use as our scratch. */
1291 if (rtx_equal_p (temp, op0))
1292 {
1293 gcc_assert (ti_temp);
1294 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1295 }
1296 temp1 = op0;
1297 temp2 = temp; /* op0 is _not_ allowed, see above. */
1298 temp3 = op0;
1299 temp4 = op0;
1300 temp5 = op0;
1301 }
1302 else
1303 {
1304 temp1 = gen_reg_rtx (DImode);
1305 temp2 = gen_reg_rtx (DImode);
1306 temp3 = gen_reg_rtx (DImode);
1307 temp4 = gen_reg_rtx (DImode);
1308 temp5 = gen_reg_rtx (DImode);
1309 }
1310
1311 emit_insn (gen_sethh (temp1, op1));
1312 emit_insn (gen_setlm (temp2, op1));
1313 emit_insn (gen_sethm (temp3, temp1, op1));
1314 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1315 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1316 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1317 gen_rtx_PLUS (DImode, temp4, temp2)));
1318 emit_insn (gen_setlo (op0, temp5, op1));
1319 break;
1320
1321 case CM_EMBMEDANY:
1322 /* Old old old backwards compatibility kruft here.
1323 Essentially it is MEDLOW with a fixed 64-bit
1324 virtual base added to all data segment addresses.
1325 Text-segment stuff is computed like MEDANY, we can't
1326 reuse the code above because the relocation knobs
1327 look different.
1328
1329 Data segment: sethi %hi(symbol), %temp1
1330 add %temp1, EMBMEDANY_BASE_REG, %temp2
1331 or %temp2, %lo(symbol), %reg */
1332 if (data_segment_operand (op1, GET_MODE (op1)))
1333 {
1334 if (temp)
1335 {
1336 temp1 = temp; /* op0 is allowed. */
1337 temp2 = op0;
1338 }
1339 else
1340 {
1341 temp1 = gen_reg_rtx (DImode);
1342 temp2 = gen_reg_rtx (DImode);
1343 }
1344
1345 emit_insn (gen_embmedany_sethi (temp1, op1));
1346 emit_insn (gen_embmedany_brsum (temp2, temp1));
1347 emit_insn (gen_embmedany_losum (op0, temp2, op1));
1348 }
1349
1350 /* Text segment: sethi %uhi(symbol), %temp1
1351 sethi %hi(symbol), %temp2
1352 or %temp1, %ulo(symbol), %temp3
1353 sllx %temp3, 32, %temp4
1354 or %temp4, %temp2, %temp5
1355 or %temp5, %lo(symbol), %reg */
1356 else
1357 {
1358 if (temp)
1359 {
1360 /* It is possible that one of the registers we got for operands[2]
1361 might coincide with that of operands[0] (which is why we made
1362 it TImode). Pick the other one to use as our scratch. */
1363 if (rtx_equal_p (temp, op0))
1364 {
1365 gcc_assert (ti_temp);
1366 temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1367 }
1368 temp1 = op0;
1369 temp2 = temp; /* op0 is _not_ allowed, see above. */
1370 temp3 = op0;
1371 temp4 = op0;
1372 temp5 = op0;
1373 }
1374 else
1375 {
1376 temp1 = gen_reg_rtx (DImode);
1377 temp2 = gen_reg_rtx (DImode);
1378 temp3 = gen_reg_rtx (DImode);
1379 temp4 = gen_reg_rtx (DImode);
1380 temp5 = gen_reg_rtx (DImode);
1381 }
1382
1383 emit_insn (gen_embmedany_textuhi (temp1, op1));
1384 emit_insn (gen_embmedany_texthi (temp2, op1));
1385 emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1386 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1387 gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1388 emit_insn (gen_rtx_SET (VOIDmode, temp5,
1389 gen_rtx_PLUS (DImode, temp4, temp2)));
1390 emit_insn (gen_embmedany_textlo (op0, temp5, op1));
1391 }
1392 break;
1393
1394 default:
1395 gcc_unreachable ();
1396 }
1397 }
1398
1399 #if HOST_BITS_PER_WIDE_INT == 32
1400 static void
1401 sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1402 {
1403 gcc_unreachable ();
1404 }
1405 #else
1406 /* These avoid problems when cross compiling. If we do not
1407 go through all this hair then the optimizer will see
1408 invalid REG_EQUAL notes or in some cases none at all. */
1409 static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1410 static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1411 static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1412 static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1413
1414 /* The optimizer is not to assume anything about exactly
1415 which bits are set for a HIGH, they are unspecified.
1416 Unfortunately this leads to many missed optimizations
1417 during CSE. We mask out the non-HIGH bits, and matches
1418 a plain movdi, to alleviate this problem. */
1419 static rtx
1420 gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1421 {
1422 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1423 }
1424
1425 static rtx
1426 gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1427 {
1428 return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1429 }
1430
1431 static rtx
1432 gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1433 {
1434 return gen_rtx_IOR (DImode, src, GEN_INT (val));
1435 }
1436
1437 static rtx
1438 gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1439 {
1440 return gen_rtx_XOR (DImode, src, GEN_INT (val));
1441 }
1442
1443 /* Worker routines for 64-bit constant formation on arch64.
1444 One of the key things to be doing in these emissions is
1445 to create as many temp REGs as possible. This makes it
1446 possible for half-built constants to be used later when
1447 such values are similar to something required later on.
1448 Without doing this, the optimizer cannot see such
1449 opportunities. */
1450
1451 static void sparc_emit_set_const64_quick1 (rtx, rtx,
1452 unsigned HOST_WIDE_INT, int);
1453
1454 static void
1455 sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1456 unsigned HOST_WIDE_INT low_bits, int is_neg)
1457 {
1458 unsigned HOST_WIDE_INT high_bits;
1459
1460 if (is_neg)
1461 high_bits = (~low_bits) & 0xffffffff;
1462 else
1463 high_bits = low_bits;
1464
1465 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1466 if (!is_neg)
1467 {
1468 emit_insn (gen_rtx_SET (VOIDmode, op0,
1469 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1470 }
1471 else
1472 {
1473 /* If we are XOR'ing with -1, then we should emit a one's complement
1474 instead. This way the combiner will notice logical operations
1475 such as ANDN later on and substitute. */
1476 if ((low_bits & 0x3ff) == 0x3ff)
1477 {
1478 emit_insn (gen_rtx_SET (VOIDmode, op0,
1479 gen_rtx_NOT (DImode, temp)));
1480 }
1481 else
1482 {
1483 emit_insn (gen_rtx_SET (VOIDmode, op0,
1484 gen_safe_XOR64 (temp,
1485 (-(HOST_WIDE_INT)0x400
1486 | (low_bits & 0x3ff)))));
1487 }
1488 }
1489 }
1490
1491 static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1492 unsigned HOST_WIDE_INT, int);
1493
1494 static void
1495 sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1496 unsigned HOST_WIDE_INT high_bits,
1497 unsigned HOST_WIDE_INT low_immediate,
1498 int shift_count)
1499 {
1500 rtx temp2 = op0;
1501
1502 if ((high_bits & 0xfffffc00) != 0)
1503 {
1504 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1505 if ((high_bits & ~0xfffffc00) != 0)
1506 emit_insn (gen_rtx_SET (VOIDmode, op0,
1507 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1508 else
1509 temp2 = temp;
1510 }
1511 else
1512 {
1513 emit_insn (gen_safe_SET64 (temp, high_bits));
1514 temp2 = temp;
1515 }
1516
1517 /* Now shift it up into place. */
1518 emit_insn (gen_rtx_SET (VOIDmode, op0,
1519 gen_rtx_ASHIFT (DImode, temp2,
1520 GEN_INT (shift_count))));
1521
1522 /* If there is a low immediate part piece, finish up by
1523 putting that in as well. */
1524 if (low_immediate != 0)
1525 emit_insn (gen_rtx_SET (VOIDmode, op0,
1526 gen_safe_OR64 (op0, low_immediate)));
1527 }
1528
1529 static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1530 unsigned HOST_WIDE_INT);
1531
1532 /* Full 64-bit constant decomposition. Even though this is the
1533 'worst' case, we still optimize a few things away. */
1534 static void
1535 sparc_emit_set_const64_longway (rtx op0, rtx temp,
1536 unsigned HOST_WIDE_INT high_bits,
1537 unsigned HOST_WIDE_INT low_bits)
1538 {
1539 rtx sub_temp;
1540
1541 if (reload_in_progress || reload_completed)
1542 sub_temp = op0;
1543 else
1544 sub_temp = gen_reg_rtx (DImode);
1545
1546 if ((high_bits & 0xfffffc00) != 0)
1547 {
1548 emit_insn (gen_safe_HIGH64 (temp, high_bits));
1549 if ((high_bits & ~0xfffffc00) != 0)
1550 emit_insn (gen_rtx_SET (VOIDmode,
1551 sub_temp,
1552 gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1553 else
1554 sub_temp = temp;
1555 }
1556 else
1557 {
1558 emit_insn (gen_safe_SET64 (temp, high_bits));
1559 sub_temp = temp;
1560 }
1561
1562 if (!reload_in_progress && !reload_completed)
1563 {
1564 rtx temp2 = gen_reg_rtx (DImode);
1565 rtx temp3 = gen_reg_rtx (DImode);
1566 rtx temp4 = gen_reg_rtx (DImode);
1567
1568 emit_insn (gen_rtx_SET (VOIDmode, temp4,
1569 gen_rtx_ASHIFT (DImode, sub_temp,
1570 GEN_INT (32))));
1571
1572 emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1573 if ((low_bits & ~0xfffffc00) != 0)
1574 {
1575 emit_insn (gen_rtx_SET (VOIDmode, temp3,
1576 gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1577 emit_insn (gen_rtx_SET (VOIDmode, op0,
1578 gen_rtx_PLUS (DImode, temp4, temp3)));
1579 }
1580 else
1581 {
1582 emit_insn (gen_rtx_SET (VOIDmode, op0,
1583 gen_rtx_PLUS (DImode, temp4, temp2)));
1584 }
1585 }
1586 else
1587 {
1588 rtx low1 = GEN_INT ((low_bits >> (32 - 12)) & 0xfff);
1589 rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12)) & 0xfff);
1590 rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1591 int to_shift = 12;
1592
1593 /* We are in the middle of reload, so this is really
1594 painful. However we do still make an attempt to
1595 avoid emitting truly stupid code. */
1596 if (low1 != const0_rtx)
1597 {
1598 emit_insn (gen_rtx_SET (VOIDmode, op0,
1599 gen_rtx_ASHIFT (DImode, sub_temp,
1600 GEN_INT (to_shift))));
1601 emit_insn (gen_rtx_SET (VOIDmode, op0,
1602 gen_rtx_IOR (DImode, op0, low1)));
1603 sub_temp = op0;
1604 to_shift = 12;
1605 }
1606 else
1607 {
1608 to_shift += 12;
1609 }
1610 if (low2 != const0_rtx)
1611 {
1612 emit_insn (gen_rtx_SET (VOIDmode, op0,
1613 gen_rtx_ASHIFT (DImode, sub_temp,
1614 GEN_INT (to_shift))));
1615 emit_insn (gen_rtx_SET (VOIDmode, op0,
1616 gen_rtx_IOR (DImode, op0, low2)));
1617 sub_temp = op0;
1618 to_shift = 8;
1619 }
1620 else
1621 {
1622 to_shift += 8;
1623 }
1624 emit_insn (gen_rtx_SET (VOIDmode, op0,
1625 gen_rtx_ASHIFT (DImode, sub_temp,
1626 GEN_INT (to_shift))));
1627 if (low3 != const0_rtx)
1628 emit_insn (gen_rtx_SET (VOIDmode, op0,
1629 gen_rtx_IOR (DImode, op0, low3)));
1630 /* phew... */
1631 }
1632 }
1633
1634 /* Analyze a 64-bit constant for certain properties. */
1635 static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1636 unsigned HOST_WIDE_INT,
1637 int *, int *, int *);
1638
1639 static void
1640 analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1641 unsigned HOST_WIDE_INT low_bits,
1642 int *hbsp, int *lbsp, int *abbasp)
1643 {
1644 int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1645 int i;
1646
1647 lowest_bit_set = highest_bit_set = -1;
1648 i = 0;
1649 do
1650 {
1651 if ((lowest_bit_set == -1)
1652 && ((low_bits >> i) & 1))
1653 lowest_bit_set = i;
1654 if ((highest_bit_set == -1)
1655 && ((high_bits >> (32 - i - 1)) & 1))
1656 highest_bit_set = (64 - i - 1);
1657 }
1658 while (++i < 32
1659 && ((highest_bit_set == -1)
1660 || (lowest_bit_set == -1)));
1661 if (i == 32)
1662 {
1663 i = 0;
1664 do
1665 {
1666 if ((lowest_bit_set == -1)
1667 && ((high_bits >> i) & 1))
1668 lowest_bit_set = i + 32;
1669 if ((highest_bit_set == -1)
1670 && ((low_bits >> (32 - i - 1)) & 1))
1671 highest_bit_set = 32 - i - 1;
1672 }
1673 while (++i < 32
1674 && ((highest_bit_set == -1)
1675 || (lowest_bit_set == -1)));
1676 }
1677 /* If there are no bits set this should have gone out
1678 as one instruction! */
1679 gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1680 all_bits_between_are_set = 1;
1681 for (i = lowest_bit_set; i <= highest_bit_set; i++)
1682 {
1683 if (i < 32)
1684 {
1685 if ((low_bits & (1 << i)) != 0)
1686 continue;
1687 }
1688 else
1689 {
1690 if ((high_bits & (1 << (i - 32))) != 0)
1691 continue;
1692 }
1693 all_bits_between_are_set = 0;
1694 break;
1695 }
1696 *hbsp = highest_bit_set;
1697 *lbsp = lowest_bit_set;
1698 *abbasp = all_bits_between_are_set;
1699 }
1700
1701 static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1702
1703 static int
1704 const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1705 unsigned HOST_WIDE_INT low_bits)
1706 {
1707 int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1708
1709 if (high_bits == 0
1710 || high_bits == 0xffffffff)
1711 return 1;
1712
1713 analyze_64bit_constant (high_bits, low_bits,
1714 &highest_bit_set, &lowest_bit_set,
1715 &all_bits_between_are_set);
1716
1717 if ((highest_bit_set == 63
1718 || lowest_bit_set == 0)
1719 && all_bits_between_are_set != 0)
1720 return 1;
1721
1722 if ((highest_bit_set - lowest_bit_set) < 21)
1723 return 1;
1724
1725 return 0;
1726 }
1727
1728 static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1729 unsigned HOST_WIDE_INT,
1730 int, int);
1731
1732 static unsigned HOST_WIDE_INT
1733 create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1734 unsigned HOST_WIDE_INT low_bits,
1735 int lowest_bit_set, int shift)
1736 {
1737 HOST_WIDE_INT hi, lo;
1738
1739 if (lowest_bit_set < 32)
1740 {
1741 lo = (low_bits >> lowest_bit_set) << shift;
1742 hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1743 }
1744 else
1745 {
1746 lo = 0;
1747 hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1748 }
1749 gcc_assert (! (hi & lo));
1750 return (hi | lo);
1751 }
1752
1753 /* Here we are sure to be arch64 and this is an integer constant
1754 being loaded into a register. Emit the most efficient
1755 insn sequence possible. Detection of all the 1-insn cases
1756 has been done already. */
1757 static void
1758 sparc_emit_set_const64 (rtx op0, rtx op1)
1759 {
1760 unsigned HOST_WIDE_INT high_bits, low_bits;
1761 int lowest_bit_set, highest_bit_set;
1762 int all_bits_between_are_set;
1763 rtx temp = 0;
1764
1765 /* Sanity check that we know what we are working with. */
1766 gcc_assert (TARGET_ARCH64
1767 && (GET_CODE (op0) == SUBREG
1768 || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1769
1770 if (reload_in_progress || reload_completed)
1771 temp = op0;
1772
1773 if (GET_CODE (op1) != CONST_INT)
1774 {
1775 sparc_emit_set_symbolic_const64 (op0, op1, temp);
1776 return;
1777 }
1778
1779 if (! temp)
1780 temp = gen_reg_rtx (DImode);
1781
1782 high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1783 low_bits = (INTVAL (op1) & 0xffffffff);
1784
1785 /* low_bits bits 0 --> 31
1786 high_bits bits 32 --> 63 */
1787
1788 analyze_64bit_constant (high_bits, low_bits,
1789 &highest_bit_set, &lowest_bit_set,
1790 &all_bits_between_are_set);
1791
1792 /* First try for a 2-insn sequence. */
1793
1794 /* These situations are preferred because the optimizer can
1795 * do more things with them:
1796 * 1) mov -1, %reg
1797 * sllx %reg, shift, %reg
1798 * 2) mov -1, %reg
1799 * srlx %reg, shift, %reg
1800 * 3) mov some_small_const, %reg
1801 * sllx %reg, shift, %reg
1802 */
1803 if (((highest_bit_set == 63
1804 || lowest_bit_set == 0)
1805 && all_bits_between_are_set != 0)
1806 || ((highest_bit_set - lowest_bit_set) < 12))
1807 {
1808 HOST_WIDE_INT the_const = -1;
1809 int shift = lowest_bit_set;
1810
1811 if ((highest_bit_set != 63
1812 && lowest_bit_set != 0)
1813 || all_bits_between_are_set == 0)
1814 {
1815 the_const =
1816 create_simple_focus_bits (high_bits, low_bits,
1817 lowest_bit_set, 0);
1818 }
1819 else if (lowest_bit_set == 0)
1820 shift = -(63 - highest_bit_set);
1821
1822 gcc_assert (SPARC_SIMM13_P (the_const));
1823 gcc_assert (shift != 0);
1824
1825 emit_insn (gen_safe_SET64 (temp, the_const));
1826 if (shift > 0)
1827 emit_insn (gen_rtx_SET (VOIDmode,
1828 op0,
1829 gen_rtx_ASHIFT (DImode,
1830 temp,
1831 GEN_INT (shift))));
1832 else if (shift < 0)
1833 emit_insn (gen_rtx_SET (VOIDmode,
1834 op0,
1835 gen_rtx_LSHIFTRT (DImode,
1836 temp,
1837 GEN_INT (-shift))));
1838 return;
1839 }
1840
1841 /* Now a range of 22 or less bits set somewhere.
1842 * 1) sethi %hi(focus_bits), %reg
1843 * sllx %reg, shift, %reg
1844 * 2) sethi %hi(focus_bits), %reg
1845 * srlx %reg, shift, %reg
1846 */
1847 if ((highest_bit_set - lowest_bit_set) < 21)
1848 {
1849 unsigned HOST_WIDE_INT focus_bits =
1850 create_simple_focus_bits (high_bits, low_bits,
1851 lowest_bit_set, 10);
1852
1853 gcc_assert (SPARC_SETHI_P (focus_bits));
1854 gcc_assert (lowest_bit_set != 10);
1855
1856 emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1857
1858 /* If lowest_bit_set == 10 then a sethi alone could have done it. */
1859 if (lowest_bit_set < 10)
1860 emit_insn (gen_rtx_SET (VOIDmode,
1861 op0,
1862 gen_rtx_LSHIFTRT (DImode, temp,
1863 GEN_INT (10 - lowest_bit_set))));
1864 else if (lowest_bit_set > 10)
1865 emit_insn (gen_rtx_SET (VOIDmode,
1866 op0,
1867 gen_rtx_ASHIFT (DImode, temp,
1868 GEN_INT (lowest_bit_set - 10))));
1869 return;
1870 }
1871
1872 /* 1) sethi %hi(low_bits), %reg
1873 * or %reg, %lo(low_bits), %reg
1874 * 2) sethi %hi(~low_bits), %reg
1875 * xor %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1876 */
1877 if (high_bits == 0
1878 || high_bits == 0xffffffff)
1879 {
1880 sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1881 (high_bits == 0xffffffff));
1882 return;
1883 }
1884
1885 /* Now, try 3-insn sequences. */
1886
1887 /* 1) sethi %hi(high_bits), %reg
1888 * or %reg, %lo(high_bits), %reg
1889 * sllx %reg, 32, %reg
1890 */
1891 if (low_bits == 0)
1892 {
1893 sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1894 return;
1895 }
1896
1897 /* We may be able to do something quick
1898 when the constant is negated, so try that. */
1899 if (const64_is_2insns ((~high_bits) & 0xffffffff,
1900 (~low_bits) & 0xfffffc00))
1901 {
1902 /* NOTE: The trailing bits get XOR'd so we need the
1903 non-negated bits, not the negated ones. */
1904 unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1905
1906 if ((((~high_bits) & 0xffffffff) == 0
1907 && ((~low_bits) & 0x80000000) == 0)
1908 || (((~high_bits) & 0xffffffff) == 0xffffffff
1909 && ((~low_bits) & 0x80000000) != 0))
1910 {
1911 unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1912
1913 if ((SPARC_SETHI_P (fast_int)
1914 && (~high_bits & 0xffffffff) == 0)
1915 || SPARC_SIMM13_P (fast_int))
1916 emit_insn (gen_safe_SET64 (temp, fast_int));
1917 else
1918 sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1919 }
1920 else
1921 {
1922 rtx negated_const;
1923 negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1924 (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1925 sparc_emit_set_const64 (temp, negated_const);
1926 }
1927
1928 /* If we are XOR'ing with -1, then we should emit a one's complement
1929 instead. This way the combiner will notice logical operations
1930 such as ANDN later on and substitute. */
1931 if (trailing_bits == 0x3ff)
1932 {
1933 emit_insn (gen_rtx_SET (VOIDmode, op0,
1934 gen_rtx_NOT (DImode, temp)));
1935 }
1936 else
1937 {
1938 emit_insn (gen_rtx_SET (VOIDmode,
1939 op0,
1940 gen_safe_XOR64 (temp,
1941 (-0x400 | trailing_bits))));
1942 }
1943 return;
1944 }
1945
1946 /* 1) sethi %hi(xxx), %reg
1947 * or %reg, %lo(xxx), %reg
1948 * sllx %reg, yyy, %reg
1949 *
1950 * ??? This is just a generalized version of the low_bits==0
1951 * thing above, FIXME...
1952 */
1953 if ((highest_bit_set - lowest_bit_set) < 32)
1954 {
1955 unsigned HOST_WIDE_INT focus_bits =
1956 create_simple_focus_bits (high_bits, low_bits,
1957 lowest_bit_set, 0);
1958
1959 /* We can't get here in this state. */
1960 gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1961
1962 /* So what we know is that the set bits straddle the
1963 middle of the 64-bit word. */
1964 sparc_emit_set_const64_quick2 (op0, temp,
1965 focus_bits, 0,
1966 lowest_bit_set);
1967 return;
1968 }
1969
1970 /* 1) sethi %hi(high_bits), %reg
1971 * or %reg, %lo(high_bits), %reg
1972 * sllx %reg, 32, %reg
1973 * or %reg, low_bits, %reg
1974 */
1975 if (SPARC_SIMM13_P(low_bits)
1976 && ((int)low_bits > 0))
1977 {
1978 sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1979 return;
1980 }
1981
1982 /* The easiest way when all else fails, is full decomposition. */
1983 sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1984 }
1985 #endif /* HOST_BITS_PER_WIDE_INT == 32 */
1986
1987 /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1988 return the mode to be used for the comparison. For floating-point,
1989 CCFP[E]mode is used. CC_NOOVmode should be used when the first operand
1990 is a PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
1991 processing is needed. */
1992
1993 enum machine_mode
1994 select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1995 {
1996 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1997 {
1998 switch (op)
1999 {
2000 case EQ:
2001 case NE:
2002 case UNORDERED:
2003 case ORDERED:
2004 case UNLT:
2005 case UNLE:
2006 case UNGT:
2007 case UNGE:
2008 case UNEQ:
2009 case LTGT:
2010 return CCFPmode;
2011
2012 case LT:
2013 case LE:
2014 case GT:
2015 case GE:
2016 return CCFPEmode;
2017
2018 default:
2019 gcc_unreachable ();
2020 }
2021 }
2022 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
2023 || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
2024 {
2025 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2026 return CCX_NOOVmode;
2027 else
2028 return CC_NOOVmode;
2029 }
2030 else
2031 {
2032 if (TARGET_ARCH64 && GET_MODE (x) == DImode)
2033 return CCXmode;
2034 else
2035 return CCmode;
2036 }
2037 }
2038
2039 /* Emit the compare insn and return the CC reg for a CODE comparison
2040 with operands X and Y. */
2041
2042 static rtx
2043 gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
2044 {
2045 enum machine_mode mode;
2046 rtx cc_reg;
2047
2048 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
2049 return x;
2050
2051 mode = SELECT_CC_MODE (code, x, y);
2052
2053 /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2054 fcc regs (cse can't tell they're really call clobbered regs and will
2055 remove a duplicate comparison even if there is an intervening function
2056 call - it will then try to reload the cc reg via an int reg which is why
2057 we need the movcc patterns). It is possible to provide the movcc
2058 patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
2059 registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
2060 to tell cse that CCFPE mode registers (even pseudos) are call
2061 clobbered. */
2062
2063 /* ??? This is an experiment. Rather than making changes to cse which may
2064 or may not be easy/clean, we do our own cse. This is possible because
2065 we will generate hard registers. Cse knows they're call clobbered (it
2066 doesn't know the same thing about pseudos). If we guess wrong, no big
2067 deal, but if we win, great! */
2068
2069 if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2070 #if 1 /* experiment */
2071 {
2072 int reg;
2073 /* We cycle through the registers to ensure they're all exercised. */
2074 static int next_fcc_reg = 0;
2075 /* Previous x,y for each fcc reg. */
2076 static rtx prev_args[4][2];
2077
2078 /* Scan prev_args for x,y. */
2079 for (reg = 0; reg < 4; reg++)
2080 if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2081 break;
2082 if (reg == 4)
2083 {
2084 reg = next_fcc_reg;
2085 prev_args[reg][0] = x;
2086 prev_args[reg][1] = y;
2087 next_fcc_reg = (next_fcc_reg + 1) & 3;
2088 }
2089 cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2090 }
2091 #else
2092 cc_reg = gen_reg_rtx (mode);
2093 #endif /* ! experiment */
2094 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2095 cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2096 else
2097 cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2098
2099 /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD. If we do, this
2100 will only result in an unrecognizable insn so no point in asserting. */
2101 emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2102
2103 return cc_reg;
2104 }
2105
2106
2107 /* Emit the compare insn and return the CC reg for the comparison in CMP. */
2108
2109 rtx
2110 gen_compare_reg (rtx cmp)
2111 {
2112 return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2113 }
2114
2115 /* This function is used for v9 only.
2116 DEST is the target of the Scc insn.
2117 CODE is the code for an Scc's comparison.
2118 X and Y are the values we compare.
2119
2120 This function is needed to turn
2121
2122 (set (reg:SI 110)
2123 (gt (reg:CCX 100 %icc)
2124 (const_int 0)))
2125 into
2126 (set (reg:SI 110)
2127 (gt:DI (reg:CCX 100 %icc)
2128 (const_int 0)))
2129
2130 IE: The instruction recognizer needs to see the mode of the comparison to
2131 find the right instruction. We could use "gt:DI" right in the
2132 define_expand, but leaving it out allows us to handle DI, SI, etc. */
2133
2134 static int
2135 gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2136 {
2137 if (! TARGET_ARCH64
2138 && (GET_MODE (x) == DImode
2139 || GET_MODE (dest) == DImode))
2140 return 0;
2141
2142 /* Try to use the movrCC insns. */
2143 if (TARGET_ARCH64
2144 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2145 && y == const0_rtx
2146 && v9_regcmp_p (compare_code))
2147 {
2148 rtx op0 = x;
2149 rtx temp;
2150
2151 /* Special case for op0 != 0. This can be done with one instruction if
2152 dest == x. */
2153
2154 if (compare_code == NE
2155 && GET_MODE (dest) == DImode
2156 && rtx_equal_p (op0, dest))
2157 {
2158 emit_insn (gen_rtx_SET (VOIDmode, dest,
2159 gen_rtx_IF_THEN_ELSE (DImode,
2160 gen_rtx_fmt_ee (compare_code, DImode,
2161 op0, const0_rtx),
2162 const1_rtx,
2163 dest)));
2164 return 1;
2165 }
2166
2167 if (reg_overlap_mentioned_p (dest, op0))
2168 {
2169 /* Handle the case where dest == x.
2170 We "early clobber" the result. */
2171 op0 = gen_reg_rtx (GET_MODE (x));
2172 emit_move_insn (op0, x);
2173 }
2174
2175 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2176 if (GET_MODE (op0) != DImode)
2177 {
2178 temp = gen_reg_rtx (DImode);
2179 convert_move (temp, op0, 0);
2180 }
2181 else
2182 temp = op0;
2183 emit_insn (gen_rtx_SET (VOIDmode, dest,
2184 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2185 gen_rtx_fmt_ee (compare_code, DImode,
2186 temp, const0_rtx),
2187 const1_rtx,
2188 dest)));
2189 return 1;
2190 }
2191 else
2192 {
2193 x = gen_compare_reg_1 (compare_code, x, y);
2194 y = const0_rtx;
2195
2196 gcc_assert (GET_MODE (x) != CC_NOOVmode
2197 && GET_MODE (x) != CCX_NOOVmode);
2198
2199 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2200 emit_insn (gen_rtx_SET (VOIDmode, dest,
2201 gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2202 gen_rtx_fmt_ee (compare_code,
2203 GET_MODE (x), x, y),
2204 const1_rtx, dest)));
2205 return 1;
2206 }
2207 }
2208
2209
2210 /* Emit an scc insn. For seq, sne, sgeu, and sltu, we can do this
2211 without jumps using the addx/subx instructions. */
2212
2213 bool
2214 emit_scc_insn (rtx operands[])
2215 {
2216 rtx tem;
2217 rtx x;
2218 rtx y;
2219 enum rtx_code code;
2220
2221 /* The quad-word fp compare library routines all return nonzero to indicate
2222 true, which is different from the equivalent libgcc routines, so we must
2223 handle them specially here. */
2224 if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2225 {
2226 operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2227 GET_CODE (operands[1]));
2228 operands[2] = XEXP (operands[1], 0);
2229 operands[3] = XEXP (operands[1], 1);
2230 }
2231
2232 code = GET_CODE (operands[1]);
2233 x = operands[2];
2234 y = operands[3];
2235
2236 /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2237 more applications). The exception to this is "reg != 0" which can
2238 be done in one instruction on v9 (so we do it). */
2239 if (code == EQ)
2240 {
2241 if (GET_MODE (x) == SImode)
2242 {
2243 rtx pat = gen_seqsi_special (operands[0], x, y);
2244 emit_insn (pat);
2245 return true;
2246 }
2247 else if (GET_MODE (x) == DImode)
2248 {
2249 rtx pat = gen_seqdi_special (operands[0], x, y);
2250 emit_insn (pat);
2251 return true;
2252 }
2253 }
2254
2255 if (code == NE)
2256 {
2257 if (GET_MODE (x) == SImode)
2258 {
2259 rtx pat = gen_snesi_special (operands[0], x, y);
2260 emit_insn (pat);
2261 return true;
2262 }
2263 else if (GET_MODE (x) == DImode)
2264 {
2265 rtx pat = gen_snedi_special (operands[0], x, y);
2266 emit_insn (pat);
2267 return true;
2268 }
2269 }
2270
2271 /* For the rest, on v9 we can use conditional moves. */
2272
2273 if (TARGET_V9)
2274 {
2275 if (gen_v9_scc (operands[0], code, x, y))
2276 return true;
2277 }
2278
2279 /* We can do LTU and GEU using the addx/subx instructions too. And
2280 for GTU/LEU, if both operands are registers swap them and fall
2281 back to the easy case. */
2282 if (code == GTU || code == LEU)
2283 {
2284 if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2285 && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2286 {
2287 tem = x;
2288 x = y;
2289 y = tem;
2290 code = swap_condition (code);
2291 }
2292 }
2293
2294 if (code == LTU || code == GEU)
2295 {
2296 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2297 gen_rtx_fmt_ee (code, SImode,
2298 gen_compare_reg_1 (code, x, y),
2299 const0_rtx)));
2300 return true;
2301 }
2302
2303 /* Nope, do branches. */
2304 return false;
2305 }
2306
2307 /* Emit a conditional jump insn for the v9 architecture using comparison code
2308 CODE and jump target LABEL.
2309 This function exists to take advantage of the v9 brxx insns. */
2310
2311 static void
2312 emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2313 {
2314 emit_jump_insn (gen_rtx_SET (VOIDmode,
2315 pc_rtx,
2316 gen_rtx_IF_THEN_ELSE (VOIDmode,
2317 gen_rtx_fmt_ee (code, GET_MODE (op0),
2318 op0, const0_rtx),
2319 gen_rtx_LABEL_REF (VOIDmode, label),
2320 pc_rtx)));
2321 }
2322
2323 void
2324 emit_conditional_branch_insn (rtx operands[])
2325 {
2326 /* The quad-word fp compare library routines all return nonzero to indicate
2327 true, which is different from the equivalent libgcc routines, so we must
2328 handle them specially here. */
2329 if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2330 {
2331 operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2332 GET_CODE (operands[0]));
2333 operands[1] = XEXP (operands[0], 0);
2334 operands[2] = XEXP (operands[0], 1);
2335 }
2336
2337 if (TARGET_ARCH64 && operands[2] == const0_rtx
2338 && GET_CODE (operands[1]) == REG
2339 && GET_MODE (operands[1]) == DImode)
2340 {
2341 emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2342 return;
2343 }
2344
2345 operands[1] = gen_compare_reg (operands[0]);
2346 operands[2] = const0_rtx;
2347 operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2348 operands[1], operands[2]);
2349 emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2350 operands[3]));
2351 }
2352
2353
2354 /* Generate a DFmode part of a hard TFmode register.
2355 REG is the TFmode hard register, LOW is 1 for the
2356 low 64bit of the register and 0 otherwise.
2357 */
2358 rtx
2359 gen_df_reg (rtx reg, int low)
2360 {
2361 int regno = REGNO (reg);
2362
2363 if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2364 regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2365 return gen_rtx_REG (DFmode, regno);
2366 }
2367 \f
2368 /* Generate a call to FUNC with OPERANDS. Operand 0 is the return value.
2369 Unlike normal calls, TFmode operands are passed by reference. It is
2370 assumed that no more than 3 operands are required. */
2371
2372 static void
2373 emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2374 {
2375 rtx ret_slot = NULL, arg[3], func_sym;
2376 int i;
2377
2378 /* We only expect to be called for conversions, unary, and binary ops. */
2379 gcc_assert (nargs == 2 || nargs == 3);
2380
2381 for (i = 0; i < nargs; ++i)
2382 {
2383 rtx this_arg = operands[i];
2384 rtx this_slot;
2385
2386 /* TFmode arguments and return values are passed by reference. */
2387 if (GET_MODE (this_arg) == TFmode)
2388 {
2389 int force_stack_temp;
2390
2391 force_stack_temp = 0;
2392 if (TARGET_BUGGY_QP_LIB && i == 0)
2393 force_stack_temp = 1;
2394
2395 if (GET_CODE (this_arg) == MEM
2396 && ! force_stack_temp)
2397 this_arg = XEXP (this_arg, 0);
2398 else if (CONSTANT_P (this_arg)
2399 && ! force_stack_temp)
2400 {
2401 this_slot = force_const_mem (TFmode, this_arg);
2402 this_arg = XEXP (this_slot, 0);
2403 }
2404 else
2405 {
2406 this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2407
2408 /* Operand 0 is the return value. We'll copy it out later. */
2409 if (i > 0)
2410 emit_move_insn (this_slot, this_arg);
2411 else
2412 ret_slot = this_slot;
2413
2414 this_arg = XEXP (this_slot, 0);
2415 }
2416 }
2417
2418 arg[i] = this_arg;
2419 }
2420
2421 func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2422
2423 if (GET_MODE (operands[0]) == TFmode)
2424 {
2425 if (nargs == 2)
2426 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2427 arg[0], GET_MODE (arg[0]),
2428 arg[1], GET_MODE (arg[1]));
2429 else
2430 emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2431 arg[0], GET_MODE (arg[0]),
2432 arg[1], GET_MODE (arg[1]),
2433 arg[2], GET_MODE (arg[2]));
2434
2435 if (ret_slot)
2436 emit_move_insn (operands[0], ret_slot);
2437 }
2438 else
2439 {
2440 rtx ret;
2441
2442 gcc_assert (nargs == 2);
2443
2444 ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2445 GET_MODE (operands[0]), 1,
2446 arg[1], GET_MODE (arg[1]));
2447
2448 if (ret != operands[0])
2449 emit_move_insn (operands[0], ret);
2450 }
2451 }
2452
2453 /* Expand soft-float TFmode calls to sparc abi routines. */
2454
2455 static void
2456 emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2457 {
2458 const char *func;
2459
2460 switch (code)
2461 {
2462 case PLUS:
2463 func = "_Qp_add";
2464 break;
2465 case MINUS:
2466 func = "_Qp_sub";
2467 break;
2468 case MULT:
2469 func = "_Qp_mul";
2470 break;
2471 case DIV:
2472 func = "_Qp_div";
2473 break;
2474 default:
2475 gcc_unreachable ();
2476 }
2477
2478 emit_soft_tfmode_libcall (func, 3, operands);
2479 }
2480
2481 static void
2482 emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2483 {
2484 const char *func;
2485
2486 gcc_assert (code == SQRT);
2487 func = "_Qp_sqrt";
2488
2489 emit_soft_tfmode_libcall (func, 2, operands);
2490 }
2491
2492 static void
2493 emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2494 {
2495 const char *func;
2496
2497 switch (code)
2498 {
2499 case FLOAT_EXTEND:
2500 switch (GET_MODE (operands[1]))
2501 {
2502 case SFmode:
2503 func = "_Qp_stoq";
2504 break;
2505 case DFmode:
2506 func = "_Qp_dtoq";
2507 break;
2508 default:
2509 gcc_unreachable ();
2510 }
2511 break;
2512
2513 case FLOAT_TRUNCATE:
2514 switch (GET_MODE (operands[0]))
2515 {
2516 case SFmode:
2517 func = "_Qp_qtos";
2518 break;
2519 case DFmode:
2520 func = "_Qp_qtod";
2521 break;
2522 default:
2523 gcc_unreachable ();
2524 }
2525 break;
2526
2527 case FLOAT:
2528 switch (GET_MODE (operands[1]))
2529 {
2530 case SImode:
2531 func = "_Qp_itoq";
2532 if (TARGET_ARCH64)
2533 operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2534 break;
2535 case DImode:
2536 func = "_Qp_xtoq";
2537 break;
2538 default:
2539 gcc_unreachable ();
2540 }
2541 break;
2542
2543 case UNSIGNED_FLOAT:
2544 switch (GET_MODE (operands[1]))
2545 {
2546 case SImode:
2547 func = "_Qp_uitoq";
2548 if (TARGET_ARCH64)
2549 operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2550 break;
2551 case DImode:
2552 func = "_Qp_uxtoq";
2553 break;
2554 default:
2555 gcc_unreachable ();
2556 }
2557 break;
2558
2559 case FIX:
2560 switch (GET_MODE (operands[0]))
2561 {
2562 case SImode:
2563 func = "_Qp_qtoi";
2564 break;
2565 case DImode:
2566 func = "_Qp_qtox";
2567 break;
2568 default:
2569 gcc_unreachable ();
2570 }
2571 break;
2572
2573 case UNSIGNED_FIX:
2574 switch (GET_MODE (operands[0]))
2575 {
2576 case SImode:
2577 func = "_Qp_qtoui";
2578 break;
2579 case DImode:
2580 func = "_Qp_qtoux";
2581 break;
2582 default:
2583 gcc_unreachable ();
2584 }
2585 break;
2586
2587 default:
2588 gcc_unreachable ();
2589 }
2590
2591 emit_soft_tfmode_libcall (func, 2, operands);
2592 }
2593
2594 /* Expand a hard-float tfmode operation. All arguments must be in
2595 registers. */
2596
2597 static void
2598 emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2599 {
2600 rtx op, dest;
2601
2602 if (GET_RTX_CLASS (code) == RTX_UNARY)
2603 {
2604 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2605 op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2606 }
2607 else
2608 {
2609 operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2610 operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2611 op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2612 operands[1], operands[2]);
2613 }
2614
2615 if (register_operand (operands[0], VOIDmode))
2616 dest = operands[0];
2617 else
2618 dest = gen_reg_rtx (GET_MODE (operands[0]));
2619
2620 emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2621
2622 if (dest != operands[0])
2623 emit_move_insn (operands[0], dest);
2624 }
2625
2626 void
2627 emit_tfmode_binop (enum rtx_code code, rtx *operands)
2628 {
2629 if (TARGET_HARD_QUAD)
2630 emit_hard_tfmode_operation (code, operands);
2631 else
2632 emit_soft_tfmode_binop (code, operands);
2633 }
2634
2635 void
2636 emit_tfmode_unop (enum rtx_code code, rtx *operands)
2637 {
2638 if (TARGET_HARD_QUAD)
2639 emit_hard_tfmode_operation (code, operands);
2640 else
2641 emit_soft_tfmode_unop (code, operands);
2642 }
2643
2644 void
2645 emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2646 {
2647 if (TARGET_HARD_QUAD)
2648 emit_hard_tfmode_operation (code, operands);
2649 else
2650 emit_soft_tfmode_cvt (code, operands);
2651 }
2652 \f
2653 /* Return nonzero if a branch/jump/call instruction will be emitting
2654 nop into its delay slot. */
2655
2656 int
2657 empty_delay_slot (rtx insn)
2658 {
2659 rtx seq;
2660
2661 /* If no previous instruction (should not happen), return true. */
2662 if (PREV_INSN (insn) == NULL)
2663 return 1;
2664
2665 seq = NEXT_INSN (PREV_INSN (insn));
2666 if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2667 return 0;
2668
2669 return 1;
2670 }
2671
2672 /* Return nonzero if TRIAL can go into the call delay slot. */
2673
2674 int
2675 tls_call_delay (rtx trial)
2676 {
2677 rtx pat;
2678
2679 /* Binutils allows
2680 call __tls_get_addr, %tgd_call (foo)
2681 add %l7, %o0, %o0, %tgd_add (foo)
2682 while Sun as/ld does not. */
2683 if (TARGET_GNU_TLS || !TARGET_TLS)
2684 return 1;
2685
2686 pat = PATTERN (trial);
2687
2688 /* We must reject tgd_add{32|64}, i.e.
2689 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2690 and tldm_add{32|64}, i.e.
2691 (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2692 for Sun as/ld. */
2693 if (GET_CODE (pat) == SET
2694 && GET_CODE (SET_SRC (pat)) == PLUS)
2695 {
2696 rtx unspec = XEXP (SET_SRC (pat), 1);
2697
2698 if (GET_CODE (unspec) == UNSPEC
2699 && (XINT (unspec, 1) == UNSPEC_TLSGD
2700 || XINT (unspec, 1) == UNSPEC_TLSLDM))
2701 return 0;
2702 }
2703
2704 return 1;
2705 }
2706
2707 /* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2708 instruction. RETURN_P is true if the v9 variant 'return' is to be
2709 considered in the test too.
2710
2711 TRIAL must be a SET whose destination is a REG appropriate for the
2712 'restore' instruction or, if RETURN_P is true, for the 'return'
2713 instruction. */
2714
2715 static int
2716 eligible_for_restore_insn (rtx trial, bool return_p)
2717 {
2718 rtx pat = PATTERN (trial);
2719 rtx src = SET_SRC (pat);
2720
2721 /* The 'restore src,%g0,dest' pattern for word mode and below. */
2722 if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2723 && arith_operand (src, GET_MODE (src)))
2724 {
2725 if (TARGET_ARCH64)
2726 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2727 else
2728 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2729 }
2730
2731 /* The 'restore src,%g0,dest' pattern for double-word mode. */
2732 else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2733 && arith_double_operand (src, GET_MODE (src)))
2734 return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2735
2736 /* The 'restore src,%g0,dest' pattern for float if no FPU. */
2737 else if (! TARGET_FPU && register_operand (src, SFmode))
2738 return 1;
2739
2740 /* The 'restore src,%g0,dest' pattern for double if no FPU. */
2741 else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2742 return 1;
2743
2744 /* If we have the 'return' instruction, anything that does not use
2745 local or output registers and can go into a delay slot wins. */
2746 else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2747 && (get_attr_in_uncond_branch_delay (trial)
2748 == IN_UNCOND_BRANCH_DELAY_TRUE))
2749 return 1;
2750
2751 /* The 'restore src1,src2,dest' pattern for SImode. */
2752 else if (GET_CODE (src) == PLUS
2753 && register_operand (XEXP (src, 0), SImode)
2754 && arith_operand (XEXP (src, 1), SImode))
2755 return 1;
2756
2757 /* The 'restore src1,src2,dest' pattern for DImode. */
2758 else if (GET_CODE (src) == PLUS
2759 && register_operand (XEXP (src, 0), DImode)
2760 && arith_double_operand (XEXP (src, 1), DImode))
2761 return 1;
2762
2763 /* The 'restore src1,%lo(src2),dest' pattern. */
2764 else if (GET_CODE (src) == LO_SUM
2765 && ! TARGET_CM_MEDMID
2766 && ((register_operand (XEXP (src, 0), SImode)
2767 && immediate_operand (XEXP (src, 1), SImode))
2768 || (TARGET_ARCH64
2769 && register_operand (XEXP (src, 0), DImode)
2770 && immediate_operand (XEXP (src, 1), DImode))))
2771 return 1;
2772
2773 /* The 'restore src,src,dest' pattern. */
2774 else if (GET_CODE (src) == ASHIFT
2775 && (register_operand (XEXP (src, 0), SImode)
2776 || register_operand (XEXP (src, 0), DImode))
2777 && XEXP (src, 1) == const1_rtx)
2778 return 1;
2779
2780 return 0;
2781 }
2782
2783 /* Return nonzero if TRIAL can go into the function return's
2784 delay slot. */
2785
2786 int
2787 eligible_for_return_delay (rtx trial)
2788 {
2789 rtx pat;
2790
2791 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2792 return 0;
2793
2794 if (get_attr_length (trial) != 1)
2795 return 0;
2796
2797 /* If there are any call-saved registers, we should scan TRIAL if it
2798 does not reference them. For now just make it easy. */
2799 if (num_gfregs)
2800 return 0;
2801
2802 /* If the function uses __builtin_eh_return, the eh_return machinery
2803 occupies the delay slot. */
2804 if (crtl->calls_eh_return)
2805 return 0;
2806
2807 /* In the case of a true leaf function, anything can go into the slot. */
2808 if (sparc_leaf_function_p)
2809 return get_attr_in_uncond_branch_delay (trial)
2810 == IN_UNCOND_BRANCH_DELAY_TRUE;
2811
2812 pat = PATTERN (trial);
2813
2814 /* Otherwise, only operations which can be done in tandem with
2815 a `restore' or `return' insn can go into the delay slot. */
2816 if (GET_CODE (SET_DEST (pat)) != REG
2817 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2818 return 0;
2819
2820 /* If this instruction sets up floating point register and we have a return
2821 instruction, it can probably go in. But restore will not work
2822 with FP_REGS. */
2823 if (REGNO (SET_DEST (pat)) >= 32)
2824 return (TARGET_V9
2825 && ! epilogue_renumber (&pat, 1)
2826 && (get_attr_in_uncond_branch_delay (trial)
2827 == IN_UNCOND_BRANCH_DELAY_TRUE));
2828
2829 return eligible_for_restore_insn (trial, true);
2830 }
2831
2832 /* Return nonzero if TRIAL can go into the sibling call's
2833 delay slot. */
2834
2835 int
2836 eligible_for_sibcall_delay (rtx trial)
2837 {
2838 rtx pat;
2839
2840 if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2841 return 0;
2842
2843 if (get_attr_length (trial) != 1)
2844 return 0;
2845
2846 pat = PATTERN (trial);
2847
2848 if (sparc_leaf_function_p)
2849 {
2850 /* If the tail call is done using the call instruction,
2851 we have to restore %o7 in the delay slot. */
2852 if (LEAF_SIBCALL_SLOT_RESERVED_P)
2853 return 0;
2854
2855 /* %g1 is used to build the function address */
2856 if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2857 return 0;
2858
2859 return 1;
2860 }
2861
2862 /* Otherwise, only operations which can be done in tandem with
2863 a `restore' insn can go into the delay slot. */
2864 if (GET_CODE (SET_DEST (pat)) != REG
2865 || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2866 || REGNO (SET_DEST (pat)) >= 32)
2867 return 0;
2868
2869 /* If it mentions %o7, it can't go in, because sibcall will clobber it
2870 in most cases. */
2871 if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2872 return 0;
2873
2874 return eligible_for_restore_insn (trial, false);
2875 }
2876
2877 int
2878 short_branch (int uid1, int uid2)
2879 {
2880 int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2881
2882 /* Leave a few words of "slop". */
2883 if (delta >= -1023 && delta <= 1022)
2884 return 1;
2885
2886 return 0;
2887 }
2888
2889 /* Return nonzero if REG is not used after INSN.
2890 We assume REG is a reload reg, and therefore does
2891 not live past labels or calls or jumps. */
2892 int
2893 reg_unused_after (rtx reg, rtx insn)
2894 {
2895 enum rtx_code code, prev_code = UNKNOWN;
2896
2897 while ((insn = NEXT_INSN (insn)))
2898 {
2899 if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2900 return 1;
2901
2902 code = GET_CODE (insn);
2903 if (GET_CODE (insn) == CODE_LABEL)
2904 return 1;
2905
2906 if (INSN_P (insn))
2907 {
2908 rtx set = single_set (insn);
2909 int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2910 if (set && in_src)
2911 return 0;
2912 if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2913 return 1;
2914 if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2915 return 0;
2916 }
2917 prev_code = code;
2918 }
2919 return 1;
2920 }
2921 \f
2922 /* Determine if it's legal to put X into the constant pool. This
2923 is not possible if X contains the address of a symbol that is
2924 not constant (TLS) or not known at final link time (PIC). */
2925
2926 static bool
2927 sparc_cannot_force_const_mem (rtx x)
2928 {
2929 switch (GET_CODE (x))
2930 {
2931 case CONST_INT:
2932 case CONST_DOUBLE:
2933 case CONST_VECTOR:
2934 /* Accept all non-symbolic constants. */
2935 return false;
2936
2937 case LABEL_REF:
2938 /* Labels are OK iff we are non-PIC. */
2939 return flag_pic != 0;
2940
2941 case SYMBOL_REF:
2942 /* 'Naked' TLS symbol references are never OK,
2943 non-TLS symbols are OK iff we are non-PIC. */
2944 if (SYMBOL_REF_TLS_MODEL (x))
2945 return true;
2946 else
2947 return flag_pic != 0;
2948
2949 case CONST:
2950 return sparc_cannot_force_const_mem (XEXP (x, 0));
2951 case PLUS:
2952 case MINUS:
2953 return sparc_cannot_force_const_mem (XEXP (x, 0))
2954 || sparc_cannot_force_const_mem (XEXP (x, 1));
2955 case UNSPEC:
2956 return true;
2957 default:
2958 gcc_unreachable ();
2959 }
2960 }
2961 \f
2962 /* Global Offset Table support. */
2963 static GTY(()) rtx got_helper_rtx = NULL_RTX;
2964 static GTY(()) rtx global_offset_table_rtx = NULL_RTX;
2965
2966 /* Return the SYMBOL_REF for the Global Offset Table. */
2967
2968 static GTY(()) rtx sparc_got_symbol = NULL_RTX;
2969
2970 static rtx
2971 sparc_got (void)
2972 {
2973 if (!sparc_got_symbol)
2974 sparc_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
2975
2976 return sparc_got_symbol;
2977 }
2978
2979 /* Ensure that we are not using patterns that are not OK with PIC. */
2980
2981 int
2982 check_pic (int i)
2983 {
2984 rtx op;
2985
2986 switch (flag_pic)
2987 {
2988 case 1:
2989 op = recog_data.operand[i];
2990 gcc_assert (GET_CODE (op) != SYMBOL_REF
2991 && (GET_CODE (op) != CONST
2992 || (GET_CODE (XEXP (op, 0)) == MINUS
2993 && XEXP (XEXP (op, 0), 0) == sparc_got ()
2994 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)));
2995 case 2:
2996 default:
2997 return 1;
2998 }
2999 }
3000
3001 /* Return true if X is an address which needs a temporary register when
3002 reloaded while generating PIC code. */
3003
3004 int
3005 pic_address_needs_scratch (rtx x)
3006 {
3007 /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
3008 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
3009 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
3010 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3011 && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
3012 return 1;
3013
3014 return 0;
3015 }
3016
3017 /* Determine if a given RTX is a valid constant. We already know this
3018 satisfies CONSTANT_P. */
3019
3020 bool
3021 legitimate_constant_p (rtx x)
3022 {
3023 switch (GET_CODE (x))
3024 {
3025 case CONST:
3026 case SYMBOL_REF:
3027 if (sparc_tls_referenced_p (x))
3028 return false;
3029 break;
3030
3031 case CONST_DOUBLE:
3032 if (GET_MODE (x) == VOIDmode)
3033 return true;
3034
3035 /* Floating point constants are generally not ok.
3036 The only exception is 0.0 in VIS. */
3037 if (TARGET_VIS
3038 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3039 && const_zero_operand (x, GET_MODE (x)))
3040 return true;
3041
3042 return false;
3043
3044 case CONST_VECTOR:
3045 /* Vector constants are generally not ok.
3046 The only exception is 0 in VIS. */
3047 if (TARGET_VIS
3048 && const_zero_operand (x, GET_MODE (x)))
3049 return true;
3050
3051 return false;
3052
3053 default:
3054 break;
3055 }
3056
3057 return true;
3058 }
3059
3060 /* Determine if a given RTX is a valid constant address. */
3061
3062 bool
3063 constant_address_p (rtx x)
3064 {
3065 switch (GET_CODE (x))
3066 {
3067 case LABEL_REF:
3068 case CONST_INT:
3069 case HIGH:
3070 return true;
3071
3072 case CONST:
3073 if (flag_pic && pic_address_needs_scratch (x))
3074 return false;
3075 return legitimate_constant_p (x);
3076
3077 case SYMBOL_REF:
3078 return !flag_pic && legitimate_constant_p (x);
3079
3080 default:
3081 return false;
3082 }
3083 }
3084
3085 /* Nonzero if the constant value X is a legitimate general operand
3086 when generating PIC code. It is given that flag_pic is on and
3087 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
3088
3089 bool
3090 legitimate_pic_operand_p (rtx x)
3091 {
3092 if (pic_address_needs_scratch (x))
3093 return false;
3094 if (sparc_tls_referenced_p (x))
3095 return false;
3096 return true;
3097 }
3098
3099 /* Return nonzero if ADDR is a valid memory address.
3100 STRICT specifies whether strict register checking applies. */
3101
3102 static bool
3103 sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3104 {
3105 rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3106
3107 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3108 rs1 = addr;
3109 else if (GET_CODE (addr) == PLUS)
3110 {
3111 rs1 = XEXP (addr, 0);
3112 rs2 = XEXP (addr, 1);
3113
3114 /* Canonicalize. REG comes first, if there are no regs,
3115 LO_SUM comes first. */
3116 if (!REG_P (rs1)
3117 && GET_CODE (rs1) != SUBREG
3118 && (REG_P (rs2)
3119 || GET_CODE (rs2) == SUBREG
3120 || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3121 {
3122 rs1 = XEXP (addr, 1);
3123 rs2 = XEXP (addr, 0);
3124 }
3125
3126 if ((flag_pic == 1
3127 && rs1 == pic_offset_table_rtx
3128 && !REG_P (rs2)
3129 && GET_CODE (rs2) != SUBREG
3130 && GET_CODE (rs2) != LO_SUM
3131 && GET_CODE (rs2) != MEM
3132 && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3133 && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3134 && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3135 || ((REG_P (rs1)
3136 || GET_CODE (rs1) == SUBREG)
3137 && RTX_OK_FOR_OFFSET_P (rs2)))
3138 {
3139 imm1 = rs2;
3140 rs2 = NULL;
3141 }
3142 else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3143 && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3144 {
3145 /* We prohibit REG + REG for TFmode when there are no quad move insns
3146 and we consequently need to split. We do this because REG+REG
3147 is not an offsettable address. If we get the situation in reload
3148 where source and destination of a movtf pattern are both MEMs with
3149 REG+REG address, then only one of them gets converted to an
3150 offsettable address. */
3151 if (mode == TFmode
3152 && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3153 return 0;
3154
3155 /* We prohibit REG + REG on ARCH32 if not optimizing for
3156 DFmode/DImode because then mem_min_alignment is likely to be zero
3157 after reload and the forced split would lack a matching splitter
3158 pattern. */
3159 if (TARGET_ARCH32 && !optimize
3160 && (mode == DFmode || mode == DImode))
3161 return 0;
3162 }
3163 else if (USE_AS_OFFSETABLE_LO10
3164 && GET_CODE (rs1) == LO_SUM
3165 && TARGET_ARCH64
3166 && ! TARGET_CM_MEDMID
3167 && RTX_OK_FOR_OLO10_P (rs2))
3168 {
3169 rs2 = NULL;
3170 imm1 = XEXP (rs1, 1);
3171 rs1 = XEXP (rs1, 0);
3172 if (!CONSTANT_P (imm1)
3173 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3174 return 0;
3175 }
3176 }
3177 else if (GET_CODE (addr) == LO_SUM)
3178 {
3179 rs1 = XEXP (addr, 0);
3180 imm1 = XEXP (addr, 1);
3181
3182 if (!CONSTANT_P (imm1)
3183 || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3184 return 0;
3185
3186 /* We can't allow TFmode in 32-bit mode, because an offset greater
3187 than the alignment (8) may cause the LO_SUM to overflow. */
3188 if (mode == TFmode && TARGET_ARCH32)
3189 return 0;
3190 }
3191 else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3192 return 1;
3193 else
3194 return 0;
3195
3196 if (GET_CODE (rs1) == SUBREG)
3197 rs1 = SUBREG_REG (rs1);
3198 if (!REG_P (rs1))
3199 return 0;
3200
3201 if (rs2)
3202 {
3203 if (GET_CODE (rs2) == SUBREG)
3204 rs2 = SUBREG_REG (rs2);
3205 if (!REG_P (rs2))
3206 return 0;
3207 }
3208
3209 if (strict)
3210 {
3211 if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3212 || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3213 return 0;
3214 }
3215 else
3216 {
3217 if ((REGNO (rs1) >= 32
3218 && REGNO (rs1) != FRAME_POINTER_REGNUM
3219 && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3220 || (rs2
3221 && (REGNO (rs2) >= 32
3222 && REGNO (rs2) != FRAME_POINTER_REGNUM
3223 && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3224 return 0;
3225 }
3226 return 1;
3227 }
3228
3229 /* Return the SYMBOL_REF for the tls_get_addr function. */
3230
3231 static GTY(()) rtx sparc_tls_symbol = NULL_RTX;
3232
3233 static rtx
3234 sparc_tls_get_addr (void)
3235 {
3236 if (!sparc_tls_symbol)
3237 sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3238
3239 return sparc_tls_symbol;
3240 }
3241
3242 /* Return the Global Offset Table to be used in TLS mode. */
3243
3244 static rtx
3245 sparc_tls_got (void)
3246 {
3247 /* In PIC mode, this is just the PIC offset table. */
3248 if (flag_pic)
3249 {
3250 crtl->uses_pic_offset_table = 1;
3251 return pic_offset_table_rtx;
3252 }
3253
3254 /* In non-PIC mode, Sun as (unlike GNU as) emits PC-relative relocations for
3255 the GOT symbol with the 32-bit ABI, so we reload the GOT register. */
3256 if (TARGET_SUN_TLS && TARGET_ARCH32)
3257 {
3258 load_got_register ();
3259 return global_offset_table_rtx;
3260 }
3261
3262 /* In all other cases, we load a new pseudo with the GOT symbol. */
3263 return copy_to_reg (sparc_got ());
3264 }
3265
3266 /* Return true if X contains a thread-local symbol. */
3267
3268 static bool
3269 sparc_tls_referenced_p (rtx x)
3270 {
3271 if (!TARGET_HAVE_TLS)
3272 return false;
3273
3274 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3275 x = XEXP (XEXP (x, 0), 0);
3276
3277 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3278 return true;
3279
3280 /* That's all we handle in sparc_legitimize_tls_address for now. */
3281 return false;
3282 }
3283
3284 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
3285 this (thread-local) address. */
3286
3287 static rtx
3288 sparc_legitimize_tls_address (rtx addr)
3289 {
3290 rtx temp1, temp2, temp3, ret, o0, got, insn;
3291
3292 gcc_assert (can_create_pseudo_p ());
3293
3294 if (GET_CODE (addr) == SYMBOL_REF)
3295 switch (SYMBOL_REF_TLS_MODEL (addr))
3296 {
3297 case TLS_MODEL_GLOBAL_DYNAMIC:
3298 start_sequence ();
3299 temp1 = gen_reg_rtx (SImode);
3300 temp2 = gen_reg_rtx (SImode);
3301 ret = gen_reg_rtx (Pmode);
3302 o0 = gen_rtx_REG (Pmode, 8);
3303 got = sparc_tls_got ();
3304 emit_insn (gen_tgd_hi22 (temp1, addr));
3305 emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3306 if (TARGET_ARCH32)
3307 {
3308 emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3309 insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3310 addr, const1_rtx));
3311 }
3312 else
3313 {
3314 emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3315 insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3316 addr, const1_rtx));
3317 }
3318 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3319 insn = get_insns ();
3320 end_sequence ();
3321 emit_libcall_block (insn, ret, o0, addr);
3322 break;
3323
3324 case TLS_MODEL_LOCAL_DYNAMIC:
3325 start_sequence ();
3326 temp1 = gen_reg_rtx (SImode);
3327 temp2 = gen_reg_rtx (SImode);
3328 temp3 = gen_reg_rtx (Pmode);
3329 ret = gen_reg_rtx (Pmode);
3330 o0 = gen_rtx_REG (Pmode, 8);
3331 got = sparc_tls_got ();
3332 emit_insn (gen_tldm_hi22 (temp1));
3333 emit_insn (gen_tldm_lo10 (temp2, temp1));
3334 if (TARGET_ARCH32)
3335 {
3336 emit_insn (gen_tldm_add32 (o0, got, temp2));
3337 insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3338 const1_rtx));
3339 }
3340 else
3341 {
3342 emit_insn (gen_tldm_add64 (o0, got, temp2));
3343 insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3344 const1_rtx));
3345 }
3346 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), o0);
3347 insn = get_insns ();
3348 end_sequence ();
3349 emit_libcall_block (insn, temp3, o0,
3350 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3351 UNSPEC_TLSLD_BASE));
3352 temp1 = gen_reg_rtx (SImode);
3353 temp2 = gen_reg_rtx (SImode);
3354 emit_insn (gen_tldo_hix22 (temp1, addr));
3355 emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3356 if (TARGET_ARCH32)
3357 emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3358 else
3359 emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3360 break;
3361
3362 case TLS_MODEL_INITIAL_EXEC:
3363 temp1 = gen_reg_rtx (SImode);
3364 temp2 = gen_reg_rtx (SImode);
3365 temp3 = gen_reg_rtx (Pmode);
3366 got = sparc_tls_got ();
3367 emit_insn (gen_tie_hi22 (temp1, addr));
3368 emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3369 if (TARGET_ARCH32)
3370 emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3371 else
3372 emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3373 if (TARGET_SUN_TLS)
3374 {
3375 ret = gen_reg_rtx (Pmode);
3376 if (TARGET_ARCH32)
3377 emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3378 temp3, addr));
3379 else
3380 emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3381 temp3, addr));
3382 }
3383 else
3384 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3385 break;
3386
3387 case TLS_MODEL_LOCAL_EXEC:
3388 temp1 = gen_reg_rtx (Pmode);
3389 temp2 = gen_reg_rtx (Pmode);
3390 if (TARGET_ARCH32)
3391 {
3392 emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3393 emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3394 }
3395 else
3396 {
3397 emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3398 emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3399 }
3400 ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3401 break;
3402
3403 default:
3404 gcc_unreachable ();
3405 }
3406
3407 else if (GET_CODE (addr) == CONST)
3408 {
3409 rtx base, offset;
3410
3411 gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3412
3413 base = sparc_legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3414 offset = XEXP (XEXP (addr, 0), 1);
3415
3416 base = force_operand (base, NULL_RTX);
3417 if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3418 offset = force_reg (Pmode, offset);
3419 ret = gen_rtx_PLUS (Pmode, base, offset);
3420 }
3421
3422 else
3423 gcc_unreachable (); /* for now ... */
3424
3425 return ret;
3426 }
3427
3428 /* Legitimize PIC addresses. If the address is already position-independent,
3429 we return ORIG. Newly generated position-independent addresses go into a
3430 reg. This is REG if nonzero, otherwise we allocate register(s) as
3431 necessary. */
3432
3433 static rtx
3434 sparc_legitimize_pic_address (rtx orig, rtx reg)
3435 {
3436 bool gotdata_op = false;
3437
3438 if (GET_CODE (orig) == SYMBOL_REF
3439 /* See the comment in sparc_expand_move. */
3440 || (GET_CODE (orig) == LABEL_REF && !can_use_mov_pic_label_ref (orig)))
3441 {
3442 rtx pic_ref, address;
3443 rtx insn;
3444
3445 if (reg == 0)
3446 {
3447 gcc_assert (! reload_in_progress && ! reload_completed);
3448 reg = gen_reg_rtx (Pmode);
3449 }
3450
3451 if (flag_pic == 2)
3452 {
3453 /* If not during reload, allocate another temp reg here for loading
3454 in the address, so that these instructions can be optimized
3455 properly. */
3456 rtx temp_reg = ((reload_in_progress || reload_completed)
3457 ? reg : gen_reg_rtx (Pmode));
3458
3459 /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3460 won't get confused into thinking that these two instructions
3461 are loading in the true address of the symbol. If in the
3462 future a PIC rtx exists, that should be used instead. */
3463 if (TARGET_ARCH64)
3464 {
3465 emit_insn (gen_movdi_high_pic (temp_reg, orig));
3466 emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3467 }
3468 else
3469 {
3470 emit_insn (gen_movsi_high_pic (temp_reg, orig));
3471 emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3472 }
3473 address = temp_reg;
3474 gotdata_op = true;
3475 }
3476 else
3477 address = orig;
3478
3479 crtl->uses_pic_offset_table = 1;
3480 if (gotdata_op)
3481 {
3482 if (TARGET_ARCH64)
3483 insn = emit_insn (gen_movdi_pic_gotdata_op (reg,
3484 pic_offset_table_rtx,
3485 address, orig));
3486 else
3487 insn = emit_insn (gen_movsi_pic_gotdata_op (reg,
3488 pic_offset_table_rtx,
3489 address, orig));
3490 }
3491 else
3492 {
3493 pic_ref
3494 = gen_const_mem (Pmode,
3495 gen_rtx_PLUS (Pmode,
3496 pic_offset_table_rtx, address));
3497 insn = emit_move_insn (reg, pic_ref);
3498 }
3499
3500 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3501 by loop. */
3502 set_unique_reg_note (insn, REG_EQUAL, orig);
3503 return reg;
3504 }
3505 else if (GET_CODE (orig) == CONST)
3506 {
3507 rtx base, offset;
3508
3509 if (GET_CODE (XEXP (orig, 0)) == PLUS
3510 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3511 return orig;
3512
3513 if (reg == 0)
3514 {
3515 gcc_assert (! reload_in_progress && ! reload_completed);
3516 reg = gen_reg_rtx (Pmode);
3517 }
3518
3519 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3520 base = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3521 offset = sparc_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3522 base == reg ? NULL_RTX : reg);
3523
3524 if (GET_CODE (offset) == CONST_INT)
3525 {
3526 if (SMALL_INT (offset))
3527 return plus_constant (base, INTVAL (offset));
3528 else if (! reload_in_progress && ! reload_completed)
3529 offset = force_reg (Pmode, offset);
3530 else
3531 /* If we reach here, then something is seriously wrong. */
3532 gcc_unreachable ();
3533 }
3534 return gen_rtx_PLUS (Pmode, base, offset);
3535 }
3536 else if (GET_CODE (orig) == LABEL_REF)
3537 /* ??? We ought to be checking that the register is live instead, in case
3538 it is eliminated. */
3539 crtl->uses_pic_offset_table = 1;
3540
3541 return orig;
3542 }
3543
3544 /* Try machine-dependent ways of modifying an illegitimate address X
3545 to be legitimate. If we find one, return the new, valid address.
3546
3547 OLDX is the address as it was before break_out_memory_refs was called.
3548 In some cases it is useful to look at this to decide what needs to be done.
3549
3550 MODE is the mode of the operand pointed to by X.
3551
3552 On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
3553
3554 static rtx
3555 sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3556 enum machine_mode mode)
3557 {
3558 rtx orig_x = x;
3559
3560 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3561 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3562 force_operand (XEXP (x, 0), NULL_RTX));
3563 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3564 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3565 force_operand (XEXP (x, 1), NULL_RTX));
3566 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3567 x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3568 XEXP (x, 1));
3569 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3570 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3571 force_operand (XEXP (x, 1), NULL_RTX));
3572
3573 if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3574 return x;
3575
3576 if (sparc_tls_referenced_p (x))
3577 x = sparc_legitimize_tls_address (x);
3578 else if (flag_pic)
3579 x = sparc_legitimize_pic_address (x, NULL_RTX);
3580 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3581 x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3582 copy_to_mode_reg (Pmode, XEXP (x, 1)));
3583 else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3584 x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3585 copy_to_mode_reg (Pmode, XEXP (x, 0)));
3586 else if (GET_CODE (x) == SYMBOL_REF
3587 || GET_CODE (x) == CONST
3588 || GET_CODE (x) == LABEL_REF)
3589 x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3590
3591 return x;
3592 }
3593
3594 /* Delegitimize an address that was legitimized by the above function. */
3595
3596 static rtx
3597 sparc_delegitimize_address (rtx x)
3598 {
3599 x = delegitimize_mem_from_attrs (x);
3600
3601 if (GET_CODE (x) == LO_SUM
3602 && GET_CODE (XEXP (x, 1)) == UNSPEC
3603 && XINT (XEXP (x, 1), 1) == UNSPEC_TLSLE)
3604 {
3605 x = XVECEXP (XEXP (x, 1), 0, 0);
3606 gcc_assert (GET_CODE (x) == SYMBOL_REF);
3607 }
3608
3609 /* This is generated by mov{si,di}_pic_label_ref in PIC mode. */
3610 if (GET_CODE (x) == MINUS
3611 && REG_P (XEXP (x, 0))
3612 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM
3613 && GET_CODE (XEXP (x, 1)) == LO_SUM
3614 && GET_CODE (XEXP (XEXP (x, 1), 1)) == UNSPEC
3615 && XINT (XEXP (XEXP (x, 1), 1), 1) == UNSPEC_MOVE_PIC_LABEL)
3616 {
3617 x = XVECEXP (XEXP (XEXP (x, 1), 1), 0, 0);
3618 gcc_assert (GET_CODE (x) == LABEL_REF);
3619 }
3620
3621 return x;
3622 }
3623
3624 /* SPARC implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
3625 replace the input X, or the original X if no replacement is called for.
3626 The output parameter *WIN is 1 if the calling macro should goto WIN,
3627 0 if it should not.
3628
3629 For SPARC, we wish to handle addresses by splitting them into
3630 HIGH+LO_SUM pairs, retaining the LO_SUM in the memory reference.
3631 This cuts the number of extra insns by one.
3632
3633 Do nothing when generating PIC code and the address is a symbolic
3634 operand or requires a scratch register. */
3635
3636 rtx
3637 sparc_legitimize_reload_address (rtx x, enum machine_mode mode,
3638 int opnum, int type,
3639 int ind_levels ATTRIBUTE_UNUSED, int *win)
3640 {
3641 /* Decompose SImode constants into HIGH+LO_SUM. */
3642 if (CONSTANT_P (x)
3643 && (mode != TFmode || TARGET_ARCH64)
3644 && GET_MODE (x) == SImode
3645 && GET_CODE (x) != LO_SUM
3646 && GET_CODE (x) != HIGH
3647 && sparc_cmodel <= CM_MEDLOW
3648 && !(flag_pic
3649 && (symbolic_operand (x, Pmode) || pic_address_needs_scratch (x))))
3650 {
3651 x = gen_rtx_LO_SUM (GET_MODE (x), gen_rtx_HIGH (GET_MODE (x), x), x);
3652 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3653 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3654 opnum, (enum reload_type)type);
3655 *win = 1;
3656 return x;
3657 }
3658
3659 /* We have to recognize what we have already generated above. */
3660 if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == HIGH)
3661 {
3662 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
3663 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
3664 opnum, (enum reload_type)type);
3665 *win = 1;
3666 return x;
3667 }
3668
3669 *win = 0;
3670 return x;
3671 }
3672
3673 /* Return true if ADDR (a legitimate address expression)
3674 has an effect that depends on the machine mode it is used for.
3675
3676 In PIC mode,
3677
3678 (mem:HI [%l7+a])
3679
3680 is not equivalent to
3681
3682 (mem:QI [%l7+a]) (mem:QI [%l7+a+1])
3683
3684 because [%l7+a+1] is interpreted as the address of (a+1). */
3685
3686
3687 static bool
3688 sparc_mode_dependent_address_p (const_rtx addr)
3689 {
3690 if (flag_pic && GET_CODE (addr) == PLUS)
3691 {
3692 rtx op0 = XEXP (addr, 0);
3693 rtx op1 = XEXP (addr, 1);
3694 if (op0 == pic_offset_table_rtx
3695 && SYMBOLIC_CONST (op1))
3696 return true;
3697 }
3698
3699 return false;
3700 }
3701
3702 #ifdef HAVE_GAS_HIDDEN
3703 # define USE_HIDDEN_LINKONCE 1
3704 #else
3705 # define USE_HIDDEN_LINKONCE 0
3706 #endif
3707
3708 static void
3709 get_pc_thunk_name (char name[32], unsigned int regno)
3710 {
3711 const char *reg_name = reg_names[regno];
3712
3713 /* Skip the leading '%' as that cannot be used in a
3714 symbol name. */
3715 reg_name += 1;
3716
3717 if (USE_HIDDEN_LINKONCE)
3718 sprintf (name, "__sparc_get_pc_thunk.%s", reg_name);
3719 else
3720 ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3721 }
3722
3723 /* Wrapper around the load_pcrel_sym{si,di} patterns. */
3724
3725 static rtx
3726 gen_load_pcrel_sym (rtx op0, rtx op1, rtx op2, rtx op3)
3727 {
3728 int orig_flag_pic = flag_pic;
3729 rtx insn;
3730
3731 /* The load_pcrel_sym{si,di} patterns require absolute addressing. */
3732 flag_pic = 0;
3733 if (TARGET_ARCH64)
3734 insn = gen_load_pcrel_symdi (op0, op1, op2, op3);
3735 else
3736 insn = gen_load_pcrel_symsi (op0, op1, op2, op3);
3737 flag_pic = orig_flag_pic;
3738
3739 return insn;
3740 }
3741
3742 /* Emit code to load the GOT register. */
3743
3744 static void
3745 load_got_register (void)
3746 {
3747 /* In PIC mode, this will retrieve pic_offset_table_rtx. */
3748 if (!global_offset_table_rtx)
3749 global_offset_table_rtx = gen_rtx_REG (Pmode, GLOBAL_OFFSET_TABLE_REGNUM);
3750
3751 if (TARGET_VXWORKS_RTP)
3752 emit_insn (gen_vxworks_load_got ());
3753 else
3754 {
3755 /* The GOT symbol is subject to a PC-relative relocation so we need a
3756 helper function to add the PC value and thus get the final value. */
3757 if (!got_helper_rtx)
3758 {
3759 char name[32];
3760 get_pc_thunk_name (name, GLOBAL_OFFSET_TABLE_REGNUM);
3761 got_helper_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3762 }
3763
3764 emit_insn (gen_load_pcrel_sym (global_offset_table_rtx, sparc_got (),
3765 got_helper_rtx,
3766 GEN_INT (GLOBAL_OFFSET_TABLE_REGNUM)));
3767 }
3768
3769 /* Need to emit this whether or not we obey regdecls,
3770 since setjmp/longjmp can cause life info to screw up.
3771 ??? In the case where we don't obey regdecls, this is not sufficient
3772 since we may not fall out the bottom. */
3773 emit_use (global_offset_table_rtx);
3774 }
3775
3776 /* Emit a call instruction with the pattern given by PAT. ADDR is the
3777 address of the call target. */
3778
3779 void
3780 sparc_emit_call_insn (rtx pat, rtx addr)
3781 {
3782 rtx insn;
3783
3784 insn = emit_call_insn (pat);
3785
3786 /* The PIC register is live on entry to VxWorks PIC PLT entries. */
3787 if (TARGET_VXWORKS_RTP
3788 && flag_pic
3789 && GET_CODE (addr) == SYMBOL_REF
3790 && (SYMBOL_REF_DECL (addr)
3791 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3792 : !SYMBOL_REF_LOCAL_P (addr)))
3793 {
3794 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3795 crtl->uses_pic_offset_table = 1;
3796 }
3797 }
3798 \f
3799 /* Return 1 if RTX is a MEM which is known to be aligned to at
3800 least a DESIRED byte boundary. */
3801
3802 int
3803 mem_min_alignment (rtx mem, int desired)
3804 {
3805 rtx addr, base, offset;
3806
3807 /* If it's not a MEM we can't accept it. */
3808 if (GET_CODE (mem) != MEM)
3809 return 0;
3810
3811 /* Obviously... */
3812 if (!TARGET_UNALIGNED_DOUBLES
3813 && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3814 return 1;
3815
3816 /* ??? The rest of the function predates MEM_ALIGN so
3817 there is probably a bit of redundancy. */
3818 addr = XEXP (mem, 0);
3819 base = offset = NULL_RTX;
3820 if (GET_CODE (addr) == PLUS)
3821 {
3822 if (GET_CODE (XEXP (addr, 0)) == REG)
3823 {
3824 base = XEXP (addr, 0);
3825
3826 /* What we are saying here is that if the base
3827 REG is aligned properly, the compiler will make
3828 sure any REG based index upon it will be so
3829 as well. */
3830 if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3831 offset = XEXP (addr, 1);
3832 else
3833 offset = const0_rtx;
3834 }
3835 }
3836 else if (GET_CODE (addr) == REG)
3837 {
3838 base = addr;
3839 offset = const0_rtx;
3840 }
3841
3842 if (base != NULL_RTX)
3843 {
3844 int regno = REGNO (base);
3845
3846 if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3847 {
3848 /* Check if the compiler has recorded some information
3849 about the alignment of the base REG. If reload has
3850 completed, we already matched with proper alignments.
3851 If not running global_alloc, reload might give us
3852 unaligned pointer to local stack though. */
3853 if (((cfun != 0
3854 && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3855 || (optimize && reload_completed))
3856 && (INTVAL (offset) & (desired - 1)) == 0)
3857 return 1;
3858 }
3859 else
3860 {
3861 if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3862 return 1;
3863 }
3864 }
3865 else if (! TARGET_UNALIGNED_DOUBLES
3866 || CONSTANT_P (addr)
3867 || GET_CODE (addr) == LO_SUM)
3868 {
3869 /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3870 is true, in which case we can only assume that an access is aligned if
3871 it is to a constant address, or the address involves a LO_SUM. */
3872 return 1;
3873 }
3874
3875 /* An obviously unaligned address. */
3876 return 0;
3877 }
3878
3879 \f
3880 /* Vectors to keep interesting information about registers where it can easily
3881 be got. We used to use the actual mode value as the bit number, but there
3882 are more than 32 modes now. Instead we use two tables: one indexed by
3883 hard register number, and one indexed by mode. */
3884
3885 /* The purpose of sparc_mode_class is to shrink the range of modes so that
3886 they all fit (as bit numbers) in a 32-bit word (again). Each real mode is
3887 mapped into one sparc_mode_class mode. */
3888
3889 enum sparc_mode_class {
3890 S_MODE, D_MODE, T_MODE, O_MODE,
3891 SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3892 CC_MODE, CCFP_MODE
3893 };
3894
3895 /* Modes for single-word and smaller quantities. */
3896 #define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3897
3898 /* Modes for double-word and smaller quantities. */
3899 #define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3900
3901 /* Modes for quad-word and smaller quantities. */
3902 #define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3903
3904 /* Modes for 8-word and smaller quantities. */
3905 #define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3906
3907 /* Modes for single-float quantities. We must allow any single word or
3908 smaller quantity. This is because the fix/float conversion instructions
3909 take integer inputs/outputs from the float registers. */
3910 #define SF_MODES (S_MODES)
3911
3912 /* Modes for double-float and smaller quantities. */
3913 #define DF_MODES (D_MODES)
3914
3915 /* Modes for quad-float and smaller quantities. */
3916 #define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3917
3918 /* Modes for quad-float pairs and smaller quantities. */
3919 #define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3920
3921 /* Modes for double-float only quantities. */
3922 #define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3923
3924 /* Modes for quad-float and double-float only quantities. */
3925 #define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3926
3927 /* Modes for quad-float pairs and double-float only quantities. */
3928 #define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3929
3930 /* Modes for condition codes. */
3931 #define CC_MODES (1 << (int) CC_MODE)
3932 #define CCFP_MODES (1 << (int) CCFP_MODE)
3933
3934 /* Value is 1 if register/mode pair is acceptable on sparc.
3935 The funny mixture of D and T modes is because integer operations
3936 do not specially operate on tetra quantities, so non-quad-aligned
3937 registers can hold quadword quantities (except %o4 and %i4 because
3938 they cross fixed registers). */
3939
3940 /* This points to either the 32 bit or the 64 bit version. */
3941 const int *hard_regno_mode_classes;
3942
3943 static const int hard_32bit_mode_classes[] = {
3944 S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3945 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3946 T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3947 T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3948
3949 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3950 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3951 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3952 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3953
3954 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3955 and none can hold SFmode/SImode values. */
3956 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3957 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3958 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3959 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3960
3961 /* %fcc[0123] */
3962 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3963
3964 /* %icc */
3965 CC_MODES
3966 };
3967
3968 static const int hard_64bit_mode_classes[] = {
3969 D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3970 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3971 T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3972 O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3973
3974 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3975 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3976 OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3977 OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3978
3979 /* FP regs f32 to f63. Only the even numbered registers actually exist,
3980 and none can hold SFmode/SImode values. */
3981 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3982 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3983 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3984 OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3985
3986 /* %fcc[0123] */
3987 CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3988
3989 /* %icc */
3990 CC_MODES
3991 };
3992
3993 int sparc_mode_class [NUM_MACHINE_MODES];
3994
3995 enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3996
3997 static void
3998 sparc_init_modes (void)
3999 {
4000 int i;
4001
4002 for (i = 0; i < NUM_MACHINE_MODES; i++)
4003 {
4004 switch (GET_MODE_CLASS (i))
4005 {
4006 case MODE_INT:
4007 case MODE_PARTIAL_INT:
4008 case MODE_COMPLEX_INT:
4009 if (GET_MODE_SIZE (i) <= 4)
4010 sparc_mode_class[i] = 1 << (int) S_MODE;
4011 else if (GET_MODE_SIZE (i) == 8)
4012 sparc_mode_class[i] = 1 << (int) D_MODE;
4013 else if (GET_MODE_SIZE (i) == 16)
4014 sparc_mode_class[i] = 1 << (int) T_MODE;
4015 else if (GET_MODE_SIZE (i) == 32)
4016 sparc_mode_class[i] = 1 << (int) O_MODE;
4017 else
4018 sparc_mode_class[i] = 0;
4019 break;
4020 case MODE_VECTOR_INT:
4021 if (GET_MODE_SIZE (i) <= 4)
4022 sparc_mode_class[i] = 1 << (int)SF_MODE;
4023 else if (GET_MODE_SIZE (i) == 8)
4024 sparc_mode_class[i] = 1 << (int)DF_MODE;
4025 break;
4026 case MODE_FLOAT:
4027 case MODE_COMPLEX_FLOAT:
4028 if (GET_MODE_SIZE (i) <= 4)
4029 sparc_mode_class[i] = 1 << (int) SF_MODE;
4030 else if (GET_MODE_SIZE (i) == 8)
4031 sparc_mode_class[i] = 1 << (int) DF_MODE;
4032 else if (GET_MODE_SIZE (i) == 16)
4033 sparc_mode_class[i] = 1 << (int) TF_MODE;
4034 else if (GET_MODE_SIZE (i) == 32)
4035 sparc_mode_class[i] = 1 << (int) OF_MODE;
4036 else
4037 sparc_mode_class[i] = 0;
4038 break;
4039 case MODE_CC:
4040 if (i == (int) CCFPmode || i == (int) CCFPEmode)
4041 sparc_mode_class[i] = 1 << (int) CCFP_MODE;
4042 else
4043 sparc_mode_class[i] = 1 << (int) CC_MODE;
4044 break;
4045 default:
4046 sparc_mode_class[i] = 0;
4047 break;
4048 }
4049 }
4050
4051 if (TARGET_ARCH64)
4052 hard_regno_mode_classes = hard_64bit_mode_classes;
4053 else
4054 hard_regno_mode_classes = hard_32bit_mode_classes;
4055
4056 /* Initialize the array used by REGNO_REG_CLASS. */
4057 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4058 {
4059 if (i < 16 && TARGET_V8PLUS)
4060 sparc_regno_reg_class[i] = I64_REGS;
4061 else if (i < 32 || i == FRAME_POINTER_REGNUM)
4062 sparc_regno_reg_class[i] = GENERAL_REGS;
4063 else if (i < 64)
4064 sparc_regno_reg_class[i] = FP_REGS;
4065 else if (i < 96)
4066 sparc_regno_reg_class[i] = EXTRA_FP_REGS;
4067 else if (i < 100)
4068 sparc_regno_reg_class[i] = FPCC_REGS;
4069 else
4070 sparc_regno_reg_class[i] = NO_REGS;
4071 }
4072 }
4073 \f
4074 /* Compute the frame size required by the function. This function is called
4075 during the reload pass and also by sparc_expand_prologue. */
4076
4077 HOST_WIDE_INT
4078 sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
4079 {
4080 int outgoing_args_size = (crtl->outgoing_args_size
4081 + REG_PARM_STACK_SPACE (current_function_decl));
4082 int n_regs = 0; /* N_REGS is the number of 4-byte regs saved thus far. */
4083 int i;
4084
4085 if (TARGET_ARCH64)
4086 {
4087 for (i = 0; i < 8; i++)
4088 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4089 n_regs += 2;
4090 }
4091 else
4092 {
4093 for (i = 0; i < 8; i += 2)
4094 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4095 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4096 n_regs += 2;
4097 }
4098
4099 for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
4100 if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
4101 || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
4102 n_regs += 2;
4103
4104 /* Set up values for use in prologue and epilogue. */
4105 num_gfregs = n_regs;
4106
4107 if (leaf_function_p
4108 && n_regs == 0
4109 && size == 0
4110 && crtl->outgoing_args_size == 0)
4111 actual_fsize = apparent_fsize = 0;
4112 else
4113 {
4114 /* We subtract STARTING_FRAME_OFFSET, remember it's negative. */
4115 apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
4116 apparent_fsize += n_regs * 4;
4117 actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
4118 }
4119
4120 /* Make sure nothing can clobber our register windows.
4121 If a SAVE must be done, or there is a stack-local variable,
4122 the register window area must be allocated. */
4123 if (! leaf_function_p || size > 0)
4124 actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
4125
4126 return SPARC_STACK_ALIGN (actual_fsize);
4127 }
4128
4129 /* Output any necessary .register pseudo-ops. */
4130
4131 void
4132 sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
4133 {
4134 #ifdef HAVE_AS_REGISTER_PSEUDO_OP
4135 int i;
4136
4137 if (TARGET_ARCH32)
4138 return;
4139
4140 /* Check if %g[2367] were used without
4141 .register being printed for them already. */
4142 for (i = 2; i < 8; i++)
4143 {
4144 if (df_regs_ever_live_p (i)
4145 && ! sparc_hard_reg_printed [i])
4146 {
4147 sparc_hard_reg_printed [i] = 1;
4148 /* %g7 is used as TLS base register, use #ignore
4149 for it instead of #scratch. */
4150 fprintf (file, "\t.register\t%%g%d, #%s\n", i,
4151 i == 7 ? "ignore" : "scratch");
4152 }
4153 if (i == 3) i = 5;
4154 }
4155 #endif
4156 }
4157
4158 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
4159
4160 #if PROBE_INTERVAL > 4096
4161 #error Cannot use indexed addressing mode for stack probing
4162 #endif
4163
4164 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
4165 inclusive. These are offsets from the current stack pointer.
4166
4167 Note that we don't use the REG+REG addressing mode for the probes because
4168 of the stack bias in 64-bit mode. And it doesn't really buy us anything
4169 so the advantages of having a single code win here. */
4170
4171 static void
4172 sparc_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
4173 {
4174 rtx g1 = gen_rtx_REG (Pmode, 1);
4175
4176 /* See if we have a constant small number of probes to generate. If so,
4177 that's the easy case. */
4178 if (size <= PROBE_INTERVAL)
4179 {
4180 emit_move_insn (g1, GEN_INT (first));
4181 emit_insn (gen_rtx_SET (VOIDmode, g1,
4182 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4183 emit_stack_probe (plus_constant (g1, -size));
4184 }
4185
4186 /* The run-time loop is made up of 10 insns in the generic case while the
4187 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
4188 else if (size <= 5 * PROBE_INTERVAL)
4189 {
4190 HOST_WIDE_INT i;
4191
4192 emit_move_insn (g1, GEN_INT (first + PROBE_INTERVAL));
4193 emit_insn (gen_rtx_SET (VOIDmode, g1,
4194 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4195 emit_stack_probe (g1);
4196
4197 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
4198 it exceeds SIZE. If only two probes are needed, this will not
4199 generate any code. Then probe at FIRST + SIZE. */
4200 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
4201 {
4202 emit_insn (gen_rtx_SET (VOIDmode, g1,
4203 plus_constant (g1, -PROBE_INTERVAL)));
4204 emit_stack_probe (g1);
4205 }
4206
4207 emit_stack_probe (plus_constant (g1, (i - PROBE_INTERVAL) - size));
4208 }
4209
4210 /* Otherwise, do the same as above, but in a loop. Note that we must be
4211 extra careful with variables wrapping around because we might be at
4212 the very top (or the very bottom) of the address space and we have
4213 to be able to handle this case properly; in particular, we use an
4214 equality test for the loop condition. */
4215 else
4216 {
4217 HOST_WIDE_INT rounded_size;
4218 rtx g4 = gen_rtx_REG (Pmode, 4);
4219
4220 emit_move_insn (g1, GEN_INT (first));
4221
4222
4223 /* Step 1: round SIZE to the previous multiple of the interval. */
4224
4225 rounded_size = size & -PROBE_INTERVAL;
4226 emit_move_insn (g4, GEN_INT (rounded_size));
4227
4228
4229 /* Step 2: compute initial and final value of the loop counter. */
4230
4231 /* TEST_ADDR = SP + FIRST. */
4232 emit_insn (gen_rtx_SET (VOIDmode, g1,
4233 gen_rtx_MINUS (Pmode, stack_pointer_rtx, g1)));
4234
4235 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
4236 emit_insn (gen_rtx_SET (VOIDmode, g4, gen_rtx_MINUS (Pmode, g1, g4)));
4237
4238
4239 /* Step 3: the loop
4240
4241 while (TEST_ADDR != LAST_ADDR)
4242 {
4243 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
4244 probe at TEST_ADDR
4245 }
4246
4247 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
4248 until it is equal to ROUNDED_SIZE. */
4249
4250 if (TARGET_64BIT)
4251 emit_insn (gen_probe_stack_rangedi (g1, g1, g4));
4252 else
4253 emit_insn (gen_probe_stack_rangesi (g1, g1, g4));
4254
4255
4256 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
4257 that SIZE is equal to ROUNDED_SIZE. */
4258
4259 if (size != rounded_size)
4260 emit_stack_probe (plus_constant (g4, rounded_size - size));
4261 }
4262
4263 /* Make sure nothing is scheduled before we are done. */
4264 emit_insn (gen_blockage ());
4265 }
4266
4267 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
4268 absolute addresses. */
4269
4270 const char *
4271 output_probe_stack_range (rtx reg1, rtx reg2)
4272 {
4273 static int labelno = 0;
4274 char loop_lab[32], end_lab[32];
4275 rtx xops[2];
4276
4277 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
4278 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
4279
4280 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
4281
4282 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
4283 xops[0] = reg1;
4284 xops[1] = reg2;
4285 output_asm_insn ("cmp\t%0, %1", xops);
4286 if (TARGET_ARCH64)
4287 fputs ("\tbe,pn\t%xcc,", asm_out_file);
4288 else
4289 fputs ("\tbe\t", asm_out_file);
4290 assemble_name_raw (asm_out_file, end_lab);
4291 fputc ('\n', asm_out_file);
4292
4293 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
4294 xops[1] = GEN_INT (-PROBE_INTERVAL);
4295 output_asm_insn (" add\t%0, %1, %0", xops);
4296
4297 /* Probe at TEST_ADDR and branch. */
4298 if (TARGET_ARCH64)
4299 fputs ("\tba,pt\t%xcc,", asm_out_file);
4300 else
4301 fputs ("\tba\t", asm_out_file);
4302 assemble_name_raw (asm_out_file, loop_lab);
4303 fputc ('\n', asm_out_file);
4304 xops[1] = GEN_INT (SPARC_STACK_BIAS);
4305 output_asm_insn (" st\t%%g0, [%0+%1]", xops);
4306
4307 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
4308
4309 return "";
4310 }
4311
4312 /* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
4313 as needed. LOW should be double-word aligned for 32-bit registers.
4314 Return the new OFFSET. */
4315
4316 #define SORR_SAVE 0
4317 #define SORR_RESTORE 1
4318
4319 static int
4320 save_or_restore_regs (int low, int high, rtx base, int offset, int action)
4321 {
4322 rtx mem, insn;
4323 int i;
4324
4325 if (TARGET_ARCH64 && high <= 32)
4326 {
4327 for (i = low; i < high; i++)
4328 {
4329 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4330 {
4331 mem = gen_frame_mem (DImode, plus_constant (base, offset));
4332 if (action == SORR_SAVE)
4333 {
4334 insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
4335 RTX_FRAME_RELATED_P (insn) = 1;
4336 }
4337 else /* action == SORR_RESTORE */
4338 emit_move_insn (gen_rtx_REG (DImode, i), mem);
4339 offset += 8;
4340 }
4341 }
4342 }
4343 else
4344 {
4345 for (i = low; i < high; i += 2)
4346 {
4347 bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4348 bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4349 enum machine_mode mode;
4350 int regno;
4351
4352 if (reg0 && reg1)
4353 {
4354 mode = i < 32 ? DImode : DFmode;
4355 regno = i;
4356 }
4357 else if (reg0)
4358 {
4359 mode = i < 32 ? SImode : SFmode;
4360 regno = i;
4361 }
4362 else if (reg1)
4363 {
4364 mode = i < 32 ? SImode : SFmode;
4365 regno = i + 1;
4366 offset += 4;
4367 }
4368 else
4369 continue;
4370
4371 mem = gen_frame_mem (mode, plus_constant (base, offset));
4372 if (action == SORR_SAVE)
4373 {
4374 insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4375 RTX_FRAME_RELATED_P (insn) = 1;
4376 }
4377 else /* action == SORR_RESTORE */
4378 emit_move_insn (gen_rtx_REG (mode, regno), mem);
4379
4380 /* Always preserve double-word alignment. */
4381 offset = (offset + 7) & -8;
4382 }
4383 }
4384
4385 return offset;
4386 }
4387
4388 /* Emit code to save call-saved registers. */
4389
4390 static void
4391 emit_save_or_restore_regs (int action)
4392 {
4393 HOST_WIDE_INT offset;
4394 rtx base;
4395
4396 offset = frame_base_offset - apparent_fsize;
4397
4398 if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4399 {
4400 /* ??? This might be optimized a little as %g1 might already have a
4401 value close enough that a single add insn will do. */
4402 /* ??? Although, all of this is probably only a temporary fix
4403 because if %g1 can hold a function result, then
4404 sparc_expand_epilogue will lose (the result will be
4405 clobbered). */
4406 base = gen_rtx_REG (Pmode, 1);
4407 emit_move_insn (base, GEN_INT (offset));
4408 emit_insn (gen_rtx_SET (VOIDmode,
4409 base,
4410 gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4411 offset = 0;
4412 }
4413 else
4414 base = frame_base_reg;
4415
4416 offset = save_or_restore_regs (0, 8, base, offset, action);
4417 save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4418 }
4419
4420 /* Generate a save_register_window insn. */
4421
4422 static rtx
4423 gen_save_register_window (rtx increment)
4424 {
4425 if (TARGET_ARCH64)
4426 return gen_save_register_windowdi (increment);
4427 else
4428 return gen_save_register_windowsi (increment);
4429 }
4430
4431 /* Generate an increment for the stack pointer. */
4432
4433 static rtx
4434 gen_stack_pointer_inc (rtx increment)
4435 {
4436 return gen_rtx_SET (VOIDmode,
4437 stack_pointer_rtx,
4438 gen_rtx_PLUS (Pmode,
4439 stack_pointer_rtx,
4440 increment));
4441 }
4442
4443 /* Generate a decrement for the stack pointer. */
4444
4445 static rtx
4446 gen_stack_pointer_dec (rtx decrement)
4447 {
4448 return gen_rtx_SET (VOIDmode,
4449 stack_pointer_rtx,
4450 gen_rtx_MINUS (Pmode,
4451 stack_pointer_rtx,
4452 decrement));
4453 }
4454
4455 /* Expand the function prologue. The prologue is responsible for reserving
4456 storage for the frame, saving the call-saved registers and loading the
4457 GOT register if needed. */
4458
4459 void
4460 sparc_expand_prologue (void)
4461 {
4462 rtx insn;
4463 int i;
4464
4465 /* Compute a snapshot of current_function_uses_only_leaf_regs. Relying
4466 on the final value of the flag means deferring the prologue/epilogue
4467 expansion until just before the second scheduling pass, which is too
4468 late to emit multiple epilogues or return insns.
4469
4470 Of course we are making the assumption that the value of the flag
4471 will not change between now and its final value. Of the three parts
4472 of the formula, only the last one can reasonably vary. Let's take a
4473 closer look, after assuming that the first two ones are set to true
4474 (otherwise the last value is effectively silenced).
4475
4476 If only_leaf_regs_used returns false, the global predicate will also
4477 be false so the actual frame size calculated below will be positive.
4478 As a consequence, the save_register_window insn will be emitted in
4479 the instruction stream; now this insn explicitly references %fp
4480 which is not a leaf register so only_leaf_regs_used will always
4481 return false subsequently.
4482
4483 If only_leaf_regs_used returns true, we hope that the subsequent
4484 optimization passes won't cause non-leaf registers to pop up. For
4485 example, the regrename pass has special provisions to not rename to
4486 non-leaf registers in a leaf function. */
4487 sparc_leaf_function_p
4488 = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4489
4490 /* Need to use actual_fsize, since we are also allocating
4491 space for our callee (and our own register save area). */
4492 actual_fsize
4493 = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4494
4495 /* Advertise that the data calculated just above are now valid. */
4496 sparc_prologue_data_valid_p = true;
4497
4498 if (flag_stack_usage)
4499 current_function_static_stack_size = actual_fsize;
4500
4501 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK && actual_fsize)
4502 sparc_emit_probe_stack_range (STACK_CHECK_PROTECT, actual_fsize);
4503
4504 if (sparc_leaf_function_p)
4505 {
4506 frame_base_reg = stack_pointer_rtx;
4507 frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4508 }
4509 else
4510 {
4511 frame_base_reg = hard_frame_pointer_rtx;
4512 frame_base_offset = SPARC_STACK_BIAS;
4513 }
4514
4515 if (actual_fsize == 0)
4516 /* do nothing. */ ;
4517 else if (sparc_leaf_function_p)
4518 {
4519 if (actual_fsize <= 4096)
4520 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4521 else if (actual_fsize <= 8192)
4522 {
4523 insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4524 /* %sp is still the CFA register. */
4525 RTX_FRAME_RELATED_P (insn) = 1;
4526 insn
4527 = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4528 }
4529 else
4530 {
4531 rtx reg = gen_rtx_REG (Pmode, 1);
4532 emit_move_insn (reg, GEN_INT (-actual_fsize));
4533 insn = emit_insn (gen_stack_pointer_inc (reg));
4534 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4535 gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4536 }
4537
4538 RTX_FRAME_RELATED_P (insn) = 1;
4539 }
4540 else
4541 {
4542 if (actual_fsize <= 4096)
4543 insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4544 else if (actual_fsize <= 8192)
4545 {
4546 insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4547 /* %sp is not the CFA register anymore. */
4548 emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4549 }
4550 else
4551 {
4552 rtx reg = gen_rtx_REG (Pmode, 1);
4553 emit_move_insn (reg, GEN_INT (-actual_fsize));
4554 insn = emit_insn (gen_save_register_window (reg));
4555 }
4556
4557 RTX_FRAME_RELATED_P (insn) = 1;
4558 for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4559 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4560 }
4561
4562 if (num_gfregs)
4563 emit_save_or_restore_regs (SORR_SAVE);
4564
4565 /* Load the GOT register if needed. */
4566 if (crtl->uses_pic_offset_table)
4567 load_got_register ();
4568 }
4569
4570 /* This function generates the assembly code for function entry, which boils
4571 down to emitting the necessary .register directives. */
4572
4573 static void
4574 sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4575 {
4576 /* Check that the assumption we made in sparc_expand_prologue is valid. */
4577 gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4578
4579 sparc_output_scratch_registers (file);
4580 }
4581
4582 /* Expand the function epilogue, either normal or part of a sibcall.
4583 We emit all the instructions except the return or the call. */
4584
4585 void
4586 sparc_expand_epilogue (void)
4587 {
4588 if (num_gfregs)
4589 emit_save_or_restore_regs (SORR_RESTORE);
4590
4591 if (actual_fsize == 0)
4592 /* do nothing. */ ;
4593 else if (sparc_leaf_function_p)
4594 {
4595 if (actual_fsize <= 4096)
4596 emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4597 else if (actual_fsize <= 8192)
4598 {
4599 emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4600 emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4601 }
4602 else
4603 {
4604 rtx reg = gen_rtx_REG (Pmode, 1);
4605 emit_move_insn (reg, GEN_INT (-actual_fsize));
4606 emit_insn (gen_stack_pointer_dec (reg));
4607 }
4608 }
4609 }
4610
4611 /* Return true if it is appropriate to emit `return' instructions in the
4612 body of a function. */
4613
4614 bool
4615 sparc_can_use_return_insn_p (void)
4616 {
4617 return sparc_prologue_data_valid_p
4618 && (actual_fsize == 0 || !sparc_leaf_function_p);
4619 }
4620
4621 /* This function generates the assembly code for function exit. */
4622
4623 static void
4624 sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4625 {
4626 /* If the last two instructions of a function are "call foo; dslot;"
4627 the return address might point to the first instruction in the next
4628 function and we have to output a dummy nop for the sake of sane
4629 backtraces in such cases. This is pointless for sibling calls since
4630 the return address is explicitly adjusted. */
4631
4632 rtx insn, last_real_insn;
4633
4634 insn = get_last_insn ();
4635
4636 last_real_insn = prev_real_insn (insn);
4637 if (last_real_insn
4638 && GET_CODE (last_real_insn) == INSN
4639 && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4640 last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4641
4642 if (last_real_insn
4643 && CALL_P (last_real_insn)
4644 && !SIBLING_CALL_P (last_real_insn))
4645 fputs("\tnop\n", file);
4646
4647 sparc_output_deferred_case_vectors ();
4648 }
4649
4650 /* Output a 'restore' instruction. */
4651
4652 static void
4653 output_restore (rtx pat)
4654 {
4655 rtx operands[3];
4656
4657 if (! pat)
4658 {
4659 fputs ("\t restore\n", asm_out_file);
4660 return;
4661 }
4662
4663 gcc_assert (GET_CODE (pat) == SET);
4664
4665 operands[0] = SET_DEST (pat);
4666 pat = SET_SRC (pat);
4667
4668 switch (GET_CODE (pat))
4669 {
4670 case PLUS:
4671 operands[1] = XEXP (pat, 0);
4672 operands[2] = XEXP (pat, 1);
4673 output_asm_insn (" restore %r1, %2, %Y0", operands);
4674 break;
4675 case LO_SUM:
4676 operands[1] = XEXP (pat, 0);
4677 operands[2] = XEXP (pat, 1);
4678 output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4679 break;
4680 case ASHIFT:
4681 operands[1] = XEXP (pat, 0);
4682 gcc_assert (XEXP (pat, 1) == const1_rtx);
4683 output_asm_insn (" restore %r1, %r1, %Y0", operands);
4684 break;
4685 default:
4686 operands[1] = pat;
4687 output_asm_insn (" restore %%g0, %1, %Y0", operands);
4688 break;
4689 }
4690 }
4691
4692 /* Output a return. */
4693
4694 const char *
4695 output_return (rtx insn)
4696 {
4697 if (sparc_leaf_function_p)
4698 {
4699 /* This is a leaf function so we don't have to bother restoring the
4700 register window, which frees us from dealing with the convoluted
4701 semantics of restore/return. We simply output the jump to the
4702 return address and the insn in the delay slot (if any). */
4703
4704 gcc_assert (! crtl->calls_eh_return);
4705
4706 return "jmp\t%%o7+%)%#";
4707 }
4708 else
4709 {
4710 /* This is a regular function so we have to restore the register window.
4711 We may have a pending insn for the delay slot, which will be either
4712 combined with the 'restore' instruction or put in the delay slot of
4713 the 'return' instruction. */
4714
4715 if (crtl->calls_eh_return)
4716 {
4717 /* If the function uses __builtin_eh_return, the eh_return
4718 machinery occupies the delay slot. */
4719 gcc_assert (! final_sequence);
4720
4721 if (! flag_delayed_branch)
4722 fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4723
4724 if (TARGET_V9)
4725 fputs ("\treturn\t%i7+8\n", asm_out_file);
4726 else
4727 fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4728
4729 if (flag_delayed_branch)
4730 fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4731 else
4732 fputs ("\t nop\n", asm_out_file);
4733 }
4734 else if (final_sequence)
4735 {
4736 rtx delay, pat;
4737
4738 delay = NEXT_INSN (insn);
4739 gcc_assert (delay);
4740
4741 pat = PATTERN (delay);
4742
4743 if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4744 {
4745 epilogue_renumber (&pat, 0);
4746 return "return\t%%i7+%)%#";
4747 }
4748 else
4749 {
4750 output_asm_insn ("jmp\t%%i7+%)", NULL);
4751 output_restore (pat);
4752 PATTERN (delay) = gen_blockage ();
4753 INSN_CODE (delay) = -1;
4754 }
4755 }
4756 else
4757 {
4758 /* The delay slot is empty. */
4759 if (TARGET_V9)
4760 return "return\t%%i7+%)\n\t nop";
4761 else if (flag_delayed_branch)
4762 return "jmp\t%%i7+%)\n\t restore";
4763 else
4764 return "restore\n\tjmp\t%%o7+%)\n\t nop";
4765 }
4766 }
4767
4768 return "";
4769 }
4770
4771 /* Output a sibling call. */
4772
4773 const char *
4774 output_sibcall (rtx insn, rtx call_operand)
4775 {
4776 rtx operands[1];
4777
4778 gcc_assert (flag_delayed_branch);
4779
4780 operands[0] = call_operand;
4781
4782 if (sparc_leaf_function_p)
4783 {
4784 /* This is a leaf function so we don't have to bother restoring the
4785 register window. We simply output the jump to the function and
4786 the insn in the delay slot (if any). */
4787
4788 gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4789
4790 if (final_sequence)
4791 output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4792 operands);
4793 else
4794 /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4795 it into branch if possible. */
4796 output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4797 operands);
4798 }
4799 else
4800 {
4801 /* This is a regular function so we have to restore the register window.
4802 We may have a pending insn for the delay slot, which will be combined
4803 with the 'restore' instruction. */
4804
4805 output_asm_insn ("call\t%a0, 0", operands);
4806
4807 if (final_sequence)
4808 {
4809 rtx delay = NEXT_INSN (insn);
4810 gcc_assert (delay);
4811
4812 output_restore (PATTERN (delay));
4813
4814 PATTERN (delay) = gen_blockage ();
4815 INSN_CODE (delay) = -1;
4816 }
4817 else
4818 output_restore (NULL_RTX);
4819 }
4820
4821 return "";
4822 }
4823 \f
4824 /* Functions for handling argument passing.
4825
4826 For 32-bit, the first 6 args are normally in registers and the rest are
4827 pushed. Any arg that starts within the first 6 words is at least
4828 partially passed in a register unless its data type forbids.
4829
4830 For 64-bit, the argument registers are laid out as an array of 16 elements
4831 and arguments are added sequentially. The first 6 int args and up to the
4832 first 16 fp args (depending on size) are passed in regs.
4833
4834 Slot Stack Integral Float Float in structure Double Long Double
4835 ---- ----- -------- ----- ------------------ ------ -----------
4836 15 [SP+248] %f31 %f30,%f31 %d30
4837 14 [SP+240] %f29 %f28,%f29 %d28 %q28
4838 13 [SP+232] %f27 %f26,%f27 %d26
4839 12 [SP+224] %f25 %f24,%f25 %d24 %q24
4840 11 [SP+216] %f23 %f22,%f23 %d22
4841 10 [SP+208] %f21 %f20,%f21 %d20 %q20
4842 9 [SP+200] %f19 %f18,%f19 %d18
4843 8 [SP+192] %f17 %f16,%f17 %d16 %q16
4844 7 [SP+184] %f15 %f14,%f15 %d14
4845 6 [SP+176] %f13 %f12,%f13 %d12 %q12
4846 5 [SP+168] %o5 %f11 %f10,%f11 %d10
4847 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
4848 3 [SP+152] %o3 %f7 %f6,%f7 %d6
4849 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
4850 1 [SP+136] %o1 %f3 %f2,%f3 %d2
4851 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
4852
4853 Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4854
4855 Integral arguments are always passed as 64-bit quantities appropriately
4856 extended.
4857
4858 Passing of floating point values is handled as follows.
4859 If a prototype is in scope:
4860 If the value is in a named argument (i.e. not a stdarg function or a
4861 value not part of the `...') then the value is passed in the appropriate
4862 fp reg.
4863 If the value is part of the `...' and is passed in one of the first 6
4864 slots then the value is passed in the appropriate int reg.
4865 If the value is part of the `...' and is not passed in one of the first 6
4866 slots then the value is passed in memory.
4867 If a prototype is not in scope:
4868 If the value is one of the first 6 arguments the value is passed in the
4869 appropriate integer reg and the appropriate fp reg.
4870 If the value is not one of the first 6 arguments the value is passed in
4871 the appropriate fp reg and in memory.
4872
4873
4874 Summary of the calling conventions implemented by GCC on the SPARC:
4875
4876 32-bit ABI:
4877 size argument return value
4878
4879 small integer <4 int. reg. int. reg.
4880 word 4 int. reg. int. reg.
4881 double word 8 int. reg. int. reg.
4882
4883 _Complex small integer <8 int. reg. int. reg.
4884 _Complex word 8 int. reg. int. reg.
4885 _Complex double word 16 memory int. reg.
4886
4887 vector integer <=8 int. reg. FP reg.
4888 vector integer >8 memory memory
4889
4890 float 4 int. reg. FP reg.
4891 double 8 int. reg. FP reg.
4892 long double 16 memory memory
4893
4894 _Complex float 8 memory FP reg.
4895 _Complex double 16 memory FP reg.
4896 _Complex long double 32 memory FP reg.
4897
4898 vector float any memory memory
4899
4900 aggregate any memory memory
4901
4902
4903
4904 64-bit ABI:
4905 size argument return value
4906
4907 small integer <8 int. reg. int. reg.
4908 word 8 int. reg. int. reg.
4909 double word 16 int. reg. int. reg.
4910
4911 _Complex small integer <16 int. reg. int. reg.
4912 _Complex word 16 int. reg. int. reg.
4913 _Complex double word 32 memory int. reg.
4914
4915 vector integer <=16 FP reg. FP reg.
4916 vector integer 16<s<=32 memory FP reg.
4917 vector integer >32 memory memory
4918
4919 float 4 FP reg. FP reg.
4920 double 8 FP reg. FP reg.
4921 long double 16 FP reg. FP reg.
4922
4923 _Complex float 8 FP reg. FP reg.
4924 _Complex double 16 FP reg. FP reg.
4925 _Complex long double 32 memory FP reg.
4926
4927 vector float <=16 FP reg. FP reg.
4928 vector float 16<s<=32 memory FP reg.
4929 vector float >32 memory memory
4930
4931 aggregate <=16 reg. reg.
4932 aggregate 16<s<=32 memory reg.
4933 aggregate >32 memory memory
4934
4935
4936
4937 Note #1: complex floating-point types follow the extended SPARC ABIs as
4938 implemented by the Sun compiler.
4939
4940 Note #2: integral vector types follow the scalar floating-point types
4941 conventions to match what is implemented by the Sun VIS SDK.
4942
4943 Note #3: floating-point vector types follow the aggregate types
4944 conventions. */
4945
4946
4947 /* Maximum number of int regs for args. */
4948 #define SPARC_INT_ARG_MAX 6
4949 /* Maximum number of fp regs for args. */
4950 #define SPARC_FP_ARG_MAX 16
4951
4952 #define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4953
4954 /* Handle the INIT_CUMULATIVE_ARGS macro.
4955 Initialize a variable CUM of type CUMULATIVE_ARGS
4956 for a call to a function whose data type is FNTYPE.
4957 For a library call, FNTYPE is 0. */
4958
4959 void
4960 init_cumulative_args (struct sparc_args *cum, tree fntype,
4961 rtx libname ATTRIBUTE_UNUSED,
4962 tree fndecl ATTRIBUTE_UNUSED)
4963 {
4964 cum->words = 0;
4965 cum->prototype_p = fntype && prototype_p (fntype);
4966 cum->libcall_p = fntype == 0;
4967 }
4968
4969 /* Handle promotion of pointer and integer arguments. */
4970
4971 static enum machine_mode
4972 sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4973 enum machine_mode mode,
4974 int *punsignedp ATTRIBUTE_UNUSED,
4975 const_tree fntype ATTRIBUTE_UNUSED,
4976 int for_return ATTRIBUTE_UNUSED)
4977 {
4978 if (POINTER_TYPE_P (type))
4979 {
4980 *punsignedp = POINTERS_EXTEND_UNSIGNED;
4981 return Pmode;
4982 }
4983
4984 /* Integral arguments are passed as full words, as per the ABI. */
4985 if (GET_MODE_CLASS (mode) == MODE_INT
4986 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4987 return word_mode;
4988
4989 return mode;
4990 }
4991
4992 /* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook. */
4993
4994 static bool
4995 sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4996 {
4997 return TARGET_ARCH64 ? true : false;
4998 }
4999
5000 /* Scan the record type TYPE and return the following predicates:
5001 - INTREGS_P: the record contains at least one field or sub-field
5002 that is eligible for promotion in integer registers.
5003 - FP_REGS_P: the record contains at least one field or sub-field
5004 that is eligible for promotion in floating-point registers.
5005 - PACKED_P: the record contains at least one field that is packed.
5006
5007 Sub-fields are not taken into account for the PACKED_P predicate. */
5008
5009 static void
5010 scan_record_type (const_tree type, int *intregs_p, int *fpregs_p,
5011 int *packed_p)
5012 {
5013 tree field;
5014
5015 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5016 {
5017 if (TREE_CODE (field) == FIELD_DECL)
5018 {
5019 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5020 scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
5021 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5022 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5023 && TARGET_FPU)
5024 *fpregs_p = 1;
5025 else
5026 *intregs_p = 1;
5027
5028 if (packed_p && DECL_PACKED (field))
5029 *packed_p = 1;
5030 }
5031 }
5032 }
5033
5034 /* Compute the slot number to pass an argument in.
5035 Return the slot number or -1 if passing on the stack.
5036
5037 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5038 the preceding args and about the function being called.
5039 MODE is the argument's machine mode.
5040 TYPE is the data type of the argument (as a tree).
5041 This is null for libcalls where that information may
5042 not be available.
5043 NAMED is nonzero if this argument is a named parameter
5044 (otherwise it is an extra parameter matching an ellipsis).
5045 INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
5046 *PREGNO records the register number to use if scalar type.
5047 *PPADDING records the amount of padding needed in words. */
5048
5049 static int
5050 function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
5051 const_tree type, bool named, bool incoming_p,
5052 int *pregno, int *ppadding)
5053 {
5054 int regbase = (incoming_p
5055 ? SPARC_INCOMING_INT_ARG_FIRST
5056 : SPARC_OUTGOING_INT_ARG_FIRST);
5057 int slotno = cum->words;
5058 enum mode_class mclass;
5059 int regno;
5060
5061 *ppadding = 0;
5062
5063 if (type && TREE_ADDRESSABLE (type))
5064 return -1;
5065
5066 if (TARGET_ARCH32
5067 && mode == BLKmode
5068 && type
5069 && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
5070 return -1;
5071
5072 /* For SPARC64, objects requiring 16-byte alignment get it. */
5073 if (TARGET_ARCH64
5074 && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
5075 && (slotno & 1) != 0)
5076 slotno++, *ppadding = 1;
5077
5078 mclass = GET_MODE_CLASS (mode);
5079 if (type && TREE_CODE (type) == VECTOR_TYPE)
5080 {
5081 /* Vector types deserve special treatment because they are
5082 polymorphic wrt their mode, depending upon whether VIS
5083 instructions are enabled. */
5084 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5085 {
5086 /* The SPARC port defines no floating-point vector modes. */
5087 gcc_assert (mode == BLKmode);
5088 }
5089 else
5090 {
5091 /* Integral vector types should either have a vector
5092 mode or an integral mode, because we are guaranteed
5093 by pass_by_reference that their size is not greater
5094 than 16 bytes and TImode is 16-byte wide. */
5095 gcc_assert (mode != BLKmode);
5096
5097 /* Vector integers are handled like floats according to
5098 the Sun VIS SDK. */
5099 mclass = MODE_FLOAT;
5100 }
5101 }
5102
5103 switch (mclass)
5104 {
5105 case MODE_FLOAT:
5106 case MODE_COMPLEX_FLOAT:
5107 case MODE_VECTOR_INT:
5108 if (TARGET_ARCH64 && TARGET_FPU && named)
5109 {
5110 if (slotno >= SPARC_FP_ARG_MAX)
5111 return -1;
5112 regno = SPARC_FP_ARG_FIRST + slotno * 2;
5113 /* Arguments filling only one single FP register are
5114 right-justified in the outer double FP register. */
5115 if (GET_MODE_SIZE (mode) <= 4)
5116 regno++;
5117 break;
5118 }
5119 /* fallthrough */
5120
5121 case MODE_INT:
5122 case MODE_COMPLEX_INT:
5123 if (slotno >= SPARC_INT_ARG_MAX)
5124 return -1;
5125 regno = regbase + slotno;
5126 break;
5127
5128 case MODE_RANDOM:
5129 if (mode == VOIDmode)
5130 /* MODE is VOIDmode when generating the actual call. */
5131 return -1;
5132
5133 gcc_assert (mode == BLKmode);
5134
5135 if (TARGET_ARCH32
5136 || !type
5137 || (TREE_CODE (type) != VECTOR_TYPE
5138 && TREE_CODE (type) != RECORD_TYPE))
5139 {
5140 if (slotno >= SPARC_INT_ARG_MAX)
5141 return -1;
5142 regno = regbase + slotno;
5143 }
5144 else /* TARGET_ARCH64 && type */
5145 {
5146 int intregs_p = 0, fpregs_p = 0, packed_p = 0;
5147
5148 /* First see what kinds of registers we would need. */
5149 if (TREE_CODE (type) == VECTOR_TYPE)
5150 fpregs_p = 1;
5151 else
5152 scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
5153
5154 /* The ABI obviously doesn't specify how packed structures
5155 are passed. These are defined to be passed in int regs
5156 if possible, otherwise memory. */
5157 if (packed_p || !named)
5158 fpregs_p = 0, intregs_p = 1;
5159
5160 /* If all arg slots are filled, then must pass on stack. */
5161 if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
5162 return -1;
5163
5164 /* If there are only int args and all int arg slots are filled,
5165 then must pass on stack. */
5166 if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
5167 return -1;
5168
5169 /* Note that even if all int arg slots are filled, fp members may
5170 still be passed in regs if such regs are available.
5171 *PREGNO isn't set because there may be more than one, it's up
5172 to the caller to compute them. */
5173 return slotno;
5174 }
5175 break;
5176
5177 default :
5178 gcc_unreachable ();
5179 }
5180
5181 *pregno = regno;
5182 return slotno;
5183 }
5184
5185 /* Handle recursive register counting for structure field layout. */
5186
5187 struct function_arg_record_value_parms
5188 {
5189 rtx ret; /* return expression being built. */
5190 int slotno; /* slot number of the argument. */
5191 int named; /* whether the argument is named. */
5192 int regbase; /* regno of the base register. */
5193 int stack; /* 1 if part of the argument is on the stack. */
5194 int intoffset; /* offset of the first pending integer field. */
5195 unsigned int nregs; /* number of words passed in registers. */
5196 };
5197
5198 static void function_arg_record_value_3
5199 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
5200 static void function_arg_record_value_2
5201 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5202 static void function_arg_record_value_1
5203 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
5204 static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
5205 static rtx function_arg_union_value (int, enum machine_mode, int, int);
5206
5207 /* A subroutine of function_arg_record_value. Traverse the structure
5208 recursively and determine how many registers will be required. */
5209
5210 static void
5211 function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
5212 struct function_arg_record_value_parms *parms,
5213 bool packed_p)
5214 {
5215 tree field;
5216
5217 /* We need to compute how many registers are needed so we can
5218 allocate the PARALLEL but before we can do that we need to know
5219 whether there are any packed fields. The ABI obviously doesn't
5220 specify how structures are passed in this case, so they are
5221 defined to be passed in int regs if possible, otherwise memory,
5222 regardless of whether there are fp values present. */
5223
5224 if (! packed_p)
5225 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5226 {
5227 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5228 {
5229 packed_p = true;
5230 break;
5231 }
5232 }
5233
5234 /* Compute how many registers we need. */
5235 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5236 {
5237 if (TREE_CODE (field) == FIELD_DECL)
5238 {
5239 HOST_WIDE_INT bitpos = startbitpos;
5240
5241 if (DECL_SIZE (field) != 0)
5242 {
5243 if (integer_zerop (DECL_SIZE (field)))
5244 continue;
5245
5246 if (host_integerp (bit_position (field), 1))
5247 bitpos += int_bit_position (field);
5248 }
5249
5250 /* ??? FIXME: else assume zero offset. */
5251
5252 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5253 function_arg_record_value_1 (TREE_TYPE (field),
5254 bitpos,
5255 parms,
5256 packed_p);
5257 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5258 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5259 && TARGET_FPU
5260 && parms->named
5261 && ! packed_p)
5262 {
5263 if (parms->intoffset != -1)
5264 {
5265 unsigned int startbit, endbit;
5266 int intslots, this_slotno;
5267
5268 startbit = parms->intoffset & -BITS_PER_WORD;
5269 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5270
5271 intslots = (endbit - startbit) / BITS_PER_WORD;
5272 this_slotno = parms->slotno + parms->intoffset
5273 / BITS_PER_WORD;
5274
5275 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5276 {
5277 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5278 /* We need to pass this field on the stack. */
5279 parms->stack = 1;
5280 }
5281
5282 parms->nregs += intslots;
5283 parms->intoffset = -1;
5284 }
5285
5286 /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
5287 If it wasn't true we wouldn't be here. */
5288 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5289 && DECL_MODE (field) == BLKmode)
5290 parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5291 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5292 parms->nregs += 2;
5293 else
5294 parms->nregs += 1;
5295 }
5296 else
5297 {
5298 if (parms->intoffset == -1)
5299 parms->intoffset = bitpos;
5300 }
5301 }
5302 }
5303 }
5304
5305 /* A subroutine of function_arg_record_value. Assign the bits of the
5306 structure between parms->intoffset and bitpos to integer registers. */
5307
5308 static void
5309 function_arg_record_value_3 (HOST_WIDE_INT bitpos,
5310 struct function_arg_record_value_parms *parms)
5311 {
5312 enum machine_mode mode;
5313 unsigned int regno;
5314 unsigned int startbit, endbit;
5315 int this_slotno, intslots, intoffset;
5316 rtx reg;
5317
5318 if (parms->intoffset == -1)
5319 return;
5320
5321 intoffset = parms->intoffset;
5322 parms->intoffset = -1;
5323
5324 startbit = intoffset & -BITS_PER_WORD;
5325 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5326 intslots = (endbit - startbit) / BITS_PER_WORD;
5327 this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
5328
5329 intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
5330 if (intslots <= 0)
5331 return;
5332
5333 /* If this is the trailing part of a word, only load that much into
5334 the register. Otherwise load the whole register. Note that in
5335 the latter case we may pick up unwanted bits. It's not a problem
5336 at the moment but may wish to revisit. */
5337
5338 if (intoffset % BITS_PER_WORD != 0)
5339 mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5340 MODE_INT);
5341 else
5342 mode = word_mode;
5343
5344 intoffset /= BITS_PER_UNIT;
5345 do
5346 {
5347 regno = parms->regbase + this_slotno;
5348 reg = gen_rtx_REG (mode, regno);
5349 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5350 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5351
5352 this_slotno += 1;
5353 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5354 mode = word_mode;
5355 parms->nregs += 1;
5356 intslots -= 1;
5357 }
5358 while (intslots > 0);
5359 }
5360
5361 /* A subroutine of function_arg_record_value. Traverse the structure
5362 recursively and assign bits to floating point registers. Track which
5363 bits in between need integer registers; invoke function_arg_record_value_3
5364 to make that happen. */
5365
5366 static void
5367 function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5368 struct function_arg_record_value_parms *parms,
5369 bool packed_p)
5370 {
5371 tree field;
5372
5373 if (! packed_p)
5374 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5375 {
5376 if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5377 {
5378 packed_p = true;
5379 break;
5380 }
5381 }
5382
5383 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5384 {
5385 if (TREE_CODE (field) == FIELD_DECL)
5386 {
5387 HOST_WIDE_INT bitpos = startbitpos;
5388
5389 if (DECL_SIZE (field) != 0)
5390 {
5391 if (integer_zerop (DECL_SIZE (field)))
5392 continue;
5393
5394 if (host_integerp (bit_position (field), 1))
5395 bitpos += int_bit_position (field);
5396 }
5397
5398 /* ??? FIXME: else assume zero offset. */
5399
5400 if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5401 function_arg_record_value_2 (TREE_TYPE (field),
5402 bitpos,
5403 parms,
5404 packed_p);
5405 else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5406 || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5407 && TARGET_FPU
5408 && parms->named
5409 && ! packed_p)
5410 {
5411 int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5412 int regno, nregs, pos;
5413 enum machine_mode mode = DECL_MODE (field);
5414 rtx reg;
5415
5416 function_arg_record_value_3 (bitpos, parms);
5417
5418 if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5419 && mode == BLKmode)
5420 {
5421 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5422 nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5423 }
5424 else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5425 {
5426 mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5427 nregs = 2;
5428 }
5429 else
5430 nregs = 1;
5431
5432 regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5433 if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5434 regno++;
5435 reg = gen_rtx_REG (mode, regno);
5436 pos = bitpos / BITS_PER_UNIT;
5437 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5438 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5439 parms->nregs += 1;
5440 while (--nregs > 0)
5441 {
5442 regno += GET_MODE_SIZE (mode) / 4;
5443 reg = gen_rtx_REG (mode, regno);
5444 pos += GET_MODE_SIZE (mode);
5445 XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5446 = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5447 parms->nregs += 1;
5448 }
5449 }
5450 else
5451 {
5452 if (parms->intoffset == -1)
5453 parms->intoffset = bitpos;
5454 }
5455 }
5456 }
5457 }
5458
5459 /* Used by function_arg and sparc_function_value_1 to implement the complex
5460 conventions of the 64-bit ABI for passing and returning structures.
5461 Return an expression valid as a return value for the FUNCTION_ARG
5462 and TARGET_FUNCTION_VALUE.
5463
5464 TYPE is the data type of the argument (as a tree).
5465 This is null for libcalls where that information may
5466 not be available.
5467 MODE is the argument's machine mode.
5468 SLOTNO is the index number of the argument's slot in the parameter array.
5469 NAMED is nonzero if this argument is a named parameter
5470 (otherwise it is an extra parameter matching an ellipsis).
5471 REGBASE is the regno of the base register for the parameter array. */
5472
5473 static rtx
5474 function_arg_record_value (const_tree type, enum machine_mode mode,
5475 int slotno, int named, int regbase)
5476 {
5477 HOST_WIDE_INT typesize = int_size_in_bytes (type);
5478 struct function_arg_record_value_parms parms;
5479 unsigned int nregs;
5480
5481 parms.ret = NULL_RTX;
5482 parms.slotno = slotno;
5483 parms.named = named;
5484 parms.regbase = regbase;
5485 parms.stack = 0;
5486
5487 /* Compute how many registers we need. */
5488 parms.nregs = 0;
5489 parms.intoffset = 0;
5490 function_arg_record_value_1 (type, 0, &parms, false);
5491
5492 /* Take into account pending integer fields. */
5493 if (parms.intoffset != -1)
5494 {
5495 unsigned int startbit, endbit;
5496 int intslots, this_slotno;
5497
5498 startbit = parms.intoffset & -BITS_PER_WORD;
5499 endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5500 intslots = (endbit - startbit) / BITS_PER_WORD;
5501 this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5502
5503 if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5504 {
5505 intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5506 /* We need to pass this field on the stack. */
5507 parms.stack = 1;
5508 }
5509
5510 parms.nregs += intslots;
5511 }
5512 nregs = parms.nregs;
5513
5514 /* Allocate the vector and handle some annoying special cases. */
5515 if (nregs == 0)
5516 {
5517 /* ??? Empty structure has no value? Duh? */
5518 if (typesize <= 0)
5519 {
5520 /* Though there's nothing really to store, return a word register
5521 anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
5522 leads to breakage due to the fact that there are zero bytes to
5523 load. */
5524 return gen_rtx_REG (mode, regbase);
5525 }
5526 else
5527 {
5528 /* ??? C++ has structures with no fields, and yet a size. Give up
5529 for now and pass everything back in integer registers. */
5530 nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5531 }
5532 if (nregs + slotno > SPARC_INT_ARG_MAX)
5533 nregs = SPARC_INT_ARG_MAX - slotno;
5534 }
5535 gcc_assert (nregs != 0);
5536
5537 parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5538
5539 /* If at least one field must be passed on the stack, generate
5540 (parallel [(expr_list (nil) ...) ...]) so that all fields will
5541 also be passed on the stack. We can't do much better because the
5542 semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5543 of structures for which the fields passed exclusively in registers
5544 are not at the beginning of the structure. */
5545 if (parms.stack)
5546 XVECEXP (parms.ret, 0, 0)
5547 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5548
5549 /* Fill in the entries. */
5550 parms.nregs = 0;
5551 parms.intoffset = 0;
5552 function_arg_record_value_2 (type, 0, &parms, false);
5553 function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5554
5555 gcc_assert (parms.nregs == nregs);
5556
5557 return parms.ret;
5558 }
5559
5560 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5561 of the 64-bit ABI for passing and returning unions.
5562 Return an expression valid as a return value for the FUNCTION_ARG
5563 and TARGET_FUNCTION_VALUE.
5564
5565 SIZE is the size in bytes of the union.
5566 MODE is the argument's machine mode.
5567 REGNO is the hard register the union will be passed in. */
5568
5569 static rtx
5570 function_arg_union_value (int size, enum machine_mode mode, int slotno,
5571 int regno)
5572 {
5573 int nwords = ROUND_ADVANCE (size), i;
5574 rtx regs;
5575
5576 /* See comment in previous function for empty structures. */
5577 if (nwords == 0)
5578 return gen_rtx_REG (mode, regno);
5579
5580 if (slotno == SPARC_INT_ARG_MAX - 1)
5581 nwords = 1;
5582
5583 regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5584
5585 for (i = 0; i < nwords; i++)
5586 {
5587 /* Unions are passed left-justified. */
5588 XVECEXP (regs, 0, i)
5589 = gen_rtx_EXPR_LIST (VOIDmode,
5590 gen_rtx_REG (word_mode, regno),
5591 GEN_INT (UNITS_PER_WORD * i));
5592 regno++;
5593 }
5594
5595 return regs;
5596 }
5597
5598 /* Used by function_arg and sparc_function_value_1 to implement the conventions
5599 for passing and returning large (BLKmode) vectors.
5600 Return an expression valid as a return value for the FUNCTION_ARG
5601 and TARGET_FUNCTION_VALUE.
5602
5603 SIZE is the size in bytes of the vector (at least 8 bytes).
5604 REGNO is the FP hard register the vector will be passed in. */
5605
5606 static rtx
5607 function_arg_vector_value (int size, int regno)
5608 {
5609 int i, nregs = size / 8;
5610 rtx regs;
5611
5612 regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5613
5614 for (i = 0; i < nregs; i++)
5615 {
5616 XVECEXP (regs, 0, i)
5617 = gen_rtx_EXPR_LIST (VOIDmode,
5618 gen_rtx_REG (DImode, regno + 2*i),
5619 GEN_INT (i*8));
5620 }
5621
5622 return regs;
5623 }
5624
5625 /* Determine where to put an argument to a function.
5626 Value is zero to push the argument on the stack,
5627 or a hard register in which to store the argument.
5628
5629 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5630 the preceding args and about the function being called.
5631 MODE is the argument's machine mode.
5632 TYPE is the data type of the argument (as a tree).
5633 This is null for libcalls where that information may
5634 not be available.
5635 NAMED is true if this argument is a named parameter
5636 (otherwise it is an extra parameter matching an ellipsis).
5637 INCOMING_P is false for TARGET_FUNCTION_ARG, true for
5638 TARGET_FUNCTION_INCOMING_ARG. */
5639
5640 static rtx
5641 sparc_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
5642 const_tree type, bool named, bool incoming_p)
5643 {
5644 int regbase = (incoming_p
5645 ? SPARC_INCOMING_INT_ARG_FIRST
5646 : SPARC_OUTGOING_INT_ARG_FIRST);
5647 int slotno, regno, padding;
5648 enum mode_class mclass = GET_MODE_CLASS (mode);
5649
5650 slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5651 &regno, &padding);
5652 if (slotno == -1)
5653 return 0;
5654
5655 /* Vector types deserve special treatment because they are polymorphic wrt
5656 their mode, depending upon whether VIS instructions are enabled. */
5657 if (type && TREE_CODE (type) == VECTOR_TYPE)
5658 {
5659 HOST_WIDE_INT size = int_size_in_bytes (type);
5660 gcc_assert ((TARGET_ARCH32 && size <= 8)
5661 || (TARGET_ARCH64 && size <= 16));
5662
5663 if (mode == BLKmode)
5664 return function_arg_vector_value (size,
5665 SPARC_FP_ARG_FIRST + 2*slotno);
5666 else
5667 mclass = MODE_FLOAT;
5668 }
5669
5670 if (TARGET_ARCH32)
5671 return gen_rtx_REG (mode, regno);
5672
5673 /* Structures up to 16 bytes in size are passed in arg slots on the stack
5674 and are promoted to registers if possible. */
5675 if (type && TREE_CODE (type) == RECORD_TYPE)
5676 {
5677 HOST_WIDE_INT size = int_size_in_bytes (type);
5678 gcc_assert (size <= 16);
5679
5680 return function_arg_record_value (type, mode, slotno, named, regbase);
5681 }
5682
5683 /* Unions up to 16 bytes in size are passed in integer registers. */
5684 else if (type && TREE_CODE (type) == UNION_TYPE)
5685 {
5686 HOST_WIDE_INT size = int_size_in_bytes (type);
5687 gcc_assert (size <= 16);
5688
5689 return function_arg_union_value (size, mode, slotno, regno);
5690 }
5691
5692 /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5693 but also have the slot allocated for them.
5694 If no prototype is in scope fp values in register slots get passed
5695 in two places, either fp regs and int regs or fp regs and memory. */
5696 else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5697 && SPARC_FP_REG_P (regno))
5698 {
5699 rtx reg = gen_rtx_REG (mode, regno);
5700 if (cum->prototype_p || cum->libcall_p)
5701 {
5702 /* "* 2" because fp reg numbers are recorded in 4 byte
5703 quantities. */
5704 #if 0
5705 /* ??? This will cause the value to be passed in the fp reg and
5706 in the stack. When a prototype exists we want to pass the
5707 value in the reg but reserve space on the stack. That's an
5708 optimization, and is deferred [for a bit]. */
5709 if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5710 return gen_rtx_PARALLEL (mode,
5711 gen_rtvec (2,
5712 gen_rtx_EXPR_LIST (VOIDmode,
5713 NULL_RTX, const0_rtx),
5714 gen_rtx_EXPR_LIST (VOIDmode,
5715 reg, const0_rtx)));
5716 else
5717 #else
5718 /* ??? It seems that passing back a register even when past
5719 the area declared by REG_PARM_STACK_SPACE will allocate
5720 space appropriately, and will not copy the data onto the
5721 stack, exactly as we desire.
5722
5723 This is due to locate_and_pad_parm being called in
5724 expand_call whenever reg_parm_stack_space > 0, which
5725 while beneficial to our example here, would seem to be
5726 in error from what had been intended. Ho hum... -- r~ */
5727 #endif
5728 return reg;
5729 }
5730 else
5731 {
5732 rtx v0, v1;
5733
5734 if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5735 {
5736 int intreg;
5737
5738 /* On incoming, we don't need to know that the value
5739 is passed in %f0 and %i0, and it confuses other parts
5740 causing needless spillage even on the simplest cases. */
5741 if (incoming_p)
5742 return reg;
5743
5744 intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5745 + (regno - SPARC_FP_ARG_FIRST) / 2);
5746
5747 v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5748 v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5749 const0_rtx);
5750 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5751 }
5752 else
5753 {
5754 v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5755 v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5756 return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5757 }
5758 }
5759 }
5760
5761 /* All other aggregate types are passed in an integer register in a mode
5762 corresponding to the size of the type. */
5763 else if (type && AGGREGATE_TYPE_P (type))
5764 {
5765 HOST_WIDE_INT size = int_size_in_bytes (type);
5766 gcc_assert (size <= 16);
5767
5768 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5769 }
5770
5771 return gen_rtx_REG (mode, regno);
5772 }
5773
5774 /* Handle the TARGET_FUNCTION_ARG target hook. */
5775
5776 static rtx
5777 sparc_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5778 const_tree type, bool named)
5779 {
5780 return sparc_function_arg_1 (cum, mode, type, named, false);
5781 }
5782
5783 /* Handle the TARGET_FUNCTION_INCOMING_ARG target hook. */
5784
5785 static rtx
5786 sparc_function_incoming_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5787 const_tree type, bool named)
5788 {
5789 return sparc_function_arg_1 (cum, mode, type, named, true);
5790 }
5791
5792 /* For sparc64, objects requiring 16 byte alignment are passed that way. */
5793
5794 static unsigned int
5795 sparc_function_arg_boundary (enum machine_mode mode, const_tree type)
5796 {
5797 return ((TARGET_ARCH64
5798 && (GET_MODE_ALIGNMENT (mode) == 128
5799 || (type && TYPE_ALIGN (type) == 128)))
5800 ? 128
5801 : PARM_BOUNDARY);
5802 }
5803
5804 /* For an arg passed partly in registers and partly in memory,
5805 this is the number of bytes of registers used.
5806 For args passed entirely in registers or entirely in memory, zero.
5807
5808 Any arg that starts in the first 6 regs but won't entirely fit in them
5809 needs partial registers on v8. On v9, structures with integer
5810 values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5811 values that begin in the last fp reg [where "last fp reg" varies with the
5812 mode] will be split between that reg and memory. */
5813
5814 static int
5815 sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5816 tree type, bool named)
5817 {
5818 int slotno, regno, padding;
5819
5820 /* We pass false for incoming_p here, it doesn't matter. */
5821 slotno = function_arg_slotno (cum, mode, type, named, false,
5822 &regno, &padding);
5823
5824 if (slotno == -1)
5825 return 0;
5826
5827 if (TARGET_ARCH32)
5828 {
5829 if ((slotno + (mode == BLKmode
5830 ? ROUND_ADVANCE (int_size_in_bytes (type))
5831 : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5832 > SPARC_INT_ARG_MAX)
5833 return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5834 }
5835 else
5836 {
5837 /* We are guaranteed by pass_by_reference that the size of the
5838 argument is not greater than 16 bytes, so we only need to return
5839 one word if the argument is partially passed in registers. */
5840
5841 if (type && AGGREGATE_TYPE_P (type))
5842 {
5843 int size = int_size_in_bytes (type);
5844
5845 if (size > UNITS_PER_WORD
5846 && slotno == SPARC_INT_ARG_MAX - 1)
5847 return UNITS_PER_WORD;
5848 }
5849 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5850 || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5851 && ! (TARGET_FPU && named)))
5852 {
5853 /* The complex types are passed as packed types. */
5854 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5855 && slotno == SPARC_INT_ARG_MAX - 1)
5856 return UNITS_PER_WORD;
5857 }
5858 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5859 {
5860 if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5861 > SPARC_FP_ARG_MAX)
5862 return UNITS_PER_WORD;
5863 }
5864 }
5865
5866 return 0;
5867 }
5868
5869 /* Handle the TARGET_PASS_BY_REFERENCE target hook.
5870 Specify whether to pass the argument by reference. */
5871
5872 static bool
5873 sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5874 enum machine_mode mode, const_tree type,
5875 bool named ATTRIBUTE_UNUSED)
5876 {
5877 if (TARGET_ARCH32)
5878 /* Original SPARC 32-bit ABI says that structures and unions,
5879 and quad-precision floats are passed by reference. For Pascal,
5880 also pass arrays by reference. All other base types are passed
5881 in registers.
5882
5883 Extended ABI (as implemented by the Sun compiler) says that all
5884 complex floats are passed by reference. Pass complex integers
5885 in registers up to 8 bytes. More generally, enforce the 2-word
5886 cap for passing arguments in registers.
5887
5888 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5889 integers are passed like floats of the same size, that is in
5890 registers up to 8 bytes. Pass all vector floats by reference
5891 like structure and unions. */
5892 return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5893 || mode == SCmode
5894 /* Catch CDImode, TFmode, DCmode and TCmode. */
5895 || GET_MODE_SIZE (mode) > 8
5896 || (type
5897 && TREE_CODE (type) == VECTOR_TYPE
5898 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5899 else
5900 /* Original SPARC 64-bit ABI says that structures and unions
5901 smaller than 16 bytes are passed in registers, as well as
5902 all other base types.
5903
5904 Extended ABI (as implemented by the Sun compiler) says that
5905 complex floats are passed in registers up to 16 bytes. Pass
5906 all complex integers in registers up to 16 bytes. More generally,
5907 enforce the 2-word cap for passing arguments in registers.
5908
5909 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5910 integers are passed like floats of the same size, that is in
5911 registers (up to 16 bytes). Pass all vector floats like structure
5912 and unions. */
5913 return ((type
5914 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5915 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5916 /* Catch CTImode and TCmode. */
5917 || GET_MODE_SIZE (mode) > 16);
5918 }
5919
5920 /* Handle the TARGET_FUNCTION_ARG_ADVANCE hook.
5921 Update the data in CUM to advance over an argument
5922 of mode MODE and data type TYPE.
5923 TYPE is null for libcalls where that information may not be available. */
5924
5925 static void
5926 sparc_function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5927 const_tree type, bool named)
5928 {
5929 int regno, padding;
5930
5931 /* We pass false for incoming_p here, it doesn't matter. */
5932 function_arg_slotno (cum, mode, type, named, false, &regno, &padding);
5933
5934 /* If argument requires leading padding, add it. */
5935 cum->words += padding;
5936
5937 if (TARGET_ARCH32)
5938 {
5939 cum->words += (mode != BLKmode
5940 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5941 : ROUND_ADVANCE (int_size_in_bytes (type)));
5942 }
5943 else
5944 {
5945 if (type && AGGREGATE_TYPE_P (type))
5946 {
5947 int size = int_size_in_bytes (type);
5948
5949 if (size <= 8)
5950 ++cum->words;
5951 else if (size <= 16)
5952 cum->words += 2;
5953 else /* passed by reference */
5954 ++cum->words;
5955 }
5956 else
5957 {
5958 cum->words += (mode != BLKmode
5959 ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5960 : ROUND_ADVANCE (int_size_in_bytes (type)));
5961 }
5962 }
5963 }
5964
5965 /* Handle the FUNCTION_ARG_PADDING macro.
5966 For the 64 bit ABI structs are always stored left shifted in their
5967 argument slot. */
5968
5969 enum direction
5970 function_arg_padding (enum machine_mode mode, const_tree type)
5971 {
5972 if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5973 return upward;
5974
5975 /* Fall back to the default. */
5976 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5977 }
5978
5979 /* Handle the TARGET_RETURN_IN_MEMORY target hook.
5980 Specify whether to return the return value in memory. */
5981
5982 static bool
5983 sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5984 {
5985 if (TARGET_ARCH32)
5986 /* Original SPARC 32-bit ABI says that structures and unions,
5987 and quad-precision floats are returned in memory. All other
5988 base types are returned in registers.
5989
5990 Extended ABI (as implemented by the Sun compiler) says that
5991 all complex floats are returned in registers (8 FP registers
5992 at most for '_Complex long double'). Return all complex integers
5993 in registers (4 at most for '_Complex long long').
5994
5995 Vector ABI (as implemented by the Sun VIS SDK) says that vector
5996 integers are returned like floats of the same size, that is in
5997 registers up to 8 bytes and in memory otherwise. Return all
5998 vector floats in memory like structure and unions; note that
5999 they always have BLKmode like the latter. */
6000 return (TYPE_MODE (type) == BLKmode
6001 || TYPE_MODE (type) == TFmode
6002 || (TREE_CODE (type) == VECTOR_TYPE
6003 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
6004 else
6005 /* Original SPARC 64-bit ABI says that structures and unions
6006 smaller than 32 bytes are returned in registers, as well as
6007 all other base types.
6008
6009 Extended ABI (as implemented by the Sun compiler) says that all
6010 complex floats are returned in registers (8 FP registers at most
6011 for '_Complex long double'). Return all complex integers in
6012 registers (4 at most for '_Complex TItype').
6013
6014 Vector ABI (as implemented by the Sun VIS SDK) says that vector
6015 integers are returned like floats of the same size, that is in
6016 registers. Return all vector floats like structure and unions;
6017 note that they always have BLKmode like the latter. */
6018 return (TYPE_MODE (type) == BLKmode
6019 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32);
6020 }
6021
6022 /* Handle the TARGET_STRUCT_VALUE target hook.
6023 Return where to find the structure return value address. */
6024
6025 static rtx
6026 sparc_struct_value_rtx (tree fndecl, int incoming)
6027 {
6028 if (TARGET_ARCH64)
6029 return 0;
6030 else
6031 {
6032 rtx mem;
6033
6034 if (incoming)
6035 mem = gen_frame_mem (Pmode, plus_constant (frame_pointer_rtx,
6036 STRUCT_VALUE_OFFSET));
6037 else
6038 mem = gen_frame_mem (Pmode, plus_constant (stack_pointer_rtx,
6039 STRUCT_VALUE_OFFSET));
6040
6041 /* Only follow the SPARC ABI for fixed-size structure returns.
6042 Variable size structure returns are handled per the normal
6043 procedures in GCC. This is enabled by -mstd-struct-return */
6044 if (incoming == 2
6045 && sparc_std_struct_return
6046 && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
6047 && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
6048 {
6049 /* We must check and adjust the return address, as it is
6050 optional as to whether the return object is really
6051 provided. */
6052 rtx ret_rtx = gen_rtx_REG (Pmode, 31);
6053 rtx scratch = gen_reg_rtx (SImode);
6054 rtx endlab = gen_label_rtx ();
6055
6056 /* Calculate the return object size */
6057 tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
6058 rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
6059 /* Construct a temporary return value */
6060 rtx temp_val
6061 = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
6062
6063 /* Implement SPARC 32-bit psABI callee return struct checking:
6064
6065 Fetch the instruction where we will return to and see if
6066 it's an unimp instruction (the most significant 10 bits
6067 will be zero). */
6068 emit_move_insn (scratch, gen_rtx_MEM (SImode,
6069 plus_constant (ret_rtx, 8)));
6070 /* Assume the size is valid and pre-adjust */
6071 emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6072 emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode,
6073 0, endlab);
6074 emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
6075 /* Write the address of the memory pointed to by temp_val into
6076 the memory pointed to by mem */
6077 emit_move_insn (mem, XEXP (temp_val, 0));
6078 emit_label (endlab);
6079 }
6080
6081 return mem;
6082 }
6083 }
6084
6085 /* Handle TARGET_FUNCTION_VALUE, and TARGET_LIBCALL_VALUE target hook.
6086 For v9, function return values are subject to the same rules as arguments,
6087 except that up to 32 bytes may be returned in registers. */
6088
6089 static rtx
6090 sparc_function_value_1 (const_tree type, enum machine_mode mode,
6091 bool outgoing)
6092 {
6093 /* Beware that the two values are swapped here wrt function_arg. */
6094 int regbase = (outgoing
6095 ? SPARC_INCOMING_INT_ARG_FIRST
6096 : SPARC_OUTGOING_INT_ARG_FIRST);
6097 enum mode_class mclass = GET_MODE_CLASS (mode);
6098 int regno;
6099
6100 /* Vector types deserve special treatment because they are polymorphic wrt
6101 their mode, depending upon whether VIS instructions are enabled. */
6102 if (type && TREE_CODE (type) == VECTOR_TYPE)
6103 {
6104 HOST_WIDE_INT size = int_size_in_bytes (type);
6105 gcc_assert ((TARGET_ARCH32 && size <= 8)
6106 || (TARGET_ARCH64 && size <= 32));
6107
6108 if (mode == BLKmode)
6109 return function_arg_vector_value (size,
6110 SPARC_FP_ARG_FIRST);
6111 else
6112 mclass = MODE_FLOAT;
6113 }
6114
6115 if (TARGET_ARCH64 && type)
6116 {
6117 /* Structures up to 32 bytes in size are returned in registers. */
6118 if (TREE_CODE (type) == RECORD_TYPE)
6119 {
6120 HOST_WIDE_INT size = int_size_in_bytes (type);
6121 gcc_assert (size <= 32);
6122
6123 return function_arg_record_value (type, mode, 0, 1, regbase);
6124 }
6125
6126 /* Unions up to 32 bytes in size are returned in integer registers. */
6127 else if (TREE_CODE (type) == UNION_TYPE)
6128 {
6129 HOST_WIDE_INT size = int_size_in_bytes (type);
6130 gcc_assert (size <= 32);
6131
6132 return function_arg_union_value (size, mode, 0, regbase);
6133 }
6134
6135 /* Objects that require it are returned in FP registers. */
6136 else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
6137 ;
6138
6139 /* All other aggregate types are returned in an integer register in a
6140 mode corresponding to the size of the type. */
6141 else if (AGGREGATE_TYPE_P (type))
6142 {
6143 /* All other aggregate types are passed in an integer register
6144 in a mode corresponding to the size of the type. */
6145 HOST_WIDE_INT size = int_size_in_bytes (type);
6146 gcc_assert (size <= 32);
6147
6148 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6149
6150 /* ??? We probably should have made the same ABI change in
6151 3.4.0 as the one we made for unions. The latter was
6152 required by the SCD though, while the former is not
6153 specified, so we favored compatibility and efficiency.
6154
6155 Now we're stuck for aggregates larger than 16 bytes,
6156 because OImode vanished in the meantime. Let's not
6157 try to be unduly clever, and simply follow the ABI
6158 for unions in that case. */
6159 if (mode == BLKmode)
6160 return function_arg_union_value (size, mode, 0, regbase);
6161 else
6162 mclass = MODE_INT;
6163 }
6164
6165 /* We should only have pointer and integer types at this point. This
6166 must match sparc_promote_function_mode. */
6167 else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6168 mode = word_mode;
6169 }
6170
6171 /* We should only have pointer and integer types at this point. This must
6172 match sparc_promote_function_mode. */
6173 else if (TARGET_ARCH32
6174 && mclass == MODE_INT
6175 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6176 mode = word_mode;
6177
6178 if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
6179 regno = SPARC_FP_ARG_FIRST;
6180 else
6181 regno = regbase;
6182
6183 return gen_rtx_REG (mode, regno);
6184 }
6185
6186 /* Handle TARGET_FUNCTION_VALUE.
6187 On the SPARC, the value is found in the first "output" register, but the
6188 called function leaves it in the first "input" register. */
6189
6190 static rtx
6191 sparc_function_value (const_tree valtype,
6192 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
6193 bool outgoing)
6194 {
6195 return sparc_function_value_1 (valtype, TYPE_MODE (valtype), outgoing);
6196 }
6197
6198 /* Handle TARGET_LIBCALL_VALUE. */
6199
6200 static rtx
6201 sparc_libcall_value (enum machine_mode mode,
6202 const_rtx fun ATTRIBUTE_UNUSED)
6203 {
6204 return sparc_function_value_1 (NULL_TREE, mode, false);
6205 }
6206
6207 /* Handle FUNCTION_VALUE_REGNO_P.
6208 On the SPARC, the first "output" reg is used for integer values, and the
6209 first floating point register is used for floating point values. */
6210
6211 static bool
6212 sparc_function_value_regno_p (const unsigned int regno)
6213 {
6214 return (regno == 8 || regno == 32);
6215 }
6216
6217 /* Do what is necessary for `va_start'. We look at the current function
6218 to determine if stdarg or varargs is used and return the address of
6219 the first unnamed parameter. */
6220
6221 static rtx
6222 sparc_builtin_saveregs (void)
6223 {
6224 int first_reg = crtl->args.info.words;
6225 rtx address;
6226 int regno;
6227
6228 for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
6229 emit_move_insn (gen_rtx_MEM (word_mode,
6230 gen_rtx_PLUS (Pmode,
6231 frame_pointer_rtx,
6232 GEN_INT (FIRST_PARM_OFFSET (0)
6233 + (UNITS_PER_WORD
6234 * regno)))),
6235 gen_rtx_REG (word_mode,
6236 SPARC_INCOMING_INT_ARG_FIRST + regno));
6237
6238 address = gen_rtx_PLUS (Pmode,
6239 frame_pointer_rtx,
6240 GEN_INT (FIRST_PARM_OFFSET (0)
6241 + UNITS_PER_WORD * first_reg));
6242
6243 return address;
6244 }
6245
6246 /* Implement `va_start' for stdarg. */
6247
6248 static void
6249 sparc_va_start (tree valist, rtx nextarg)
6250 {
6251 nextarg = expand_builtin_saveregs ();
6252 std_expand_builtin_va_start (valist, nextarg);
6253 }
6254
6255 /* Implement `va_arg' for stdarg. */
6256
6257 static tree
6258 sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6259 gimple_seq *post_p)
6260 {
6261 HOST_WIDE_INT size, rsize, align;
6262 tree addr, incr;
6263 bool indirect;
6264 tree ptrtype = build_pointer_type (type);
6265
6266 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
6267 {
6268 indirect = true;
6269 size = rsize = UNITS_PER_WORD;
6270 align = 0;
6271 }
6272 else
6273 {
6274 indirect = false;
6275 size = int_size_in_bytes (type);
6276 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6277 align = 0;
6278
6279 if (TARGET_ARCH64)
6280 {
6281 /* For SPARC64, objects requiring 16-byte alignment get it. */
6282 if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
6283 align = 2 * UNITS_PER_WORD;
6284
6285 /* SPARC-V9 ABI states that structures up to 16 bytes in size
6286 are left-justified in their slots. */
6287 if (AGGREGATE_TYPE_P (type))
6288 {
6289 if (size == 0)
6290 size = rsize = UNITS_PER_WORD;
6291 else
6292 size = rsize;
6293 }
6294 }
6295 }
6296
6297 incr = valist;
6298 if (align)
6299 {
6300 incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6301 size_int (align - 1));
6302 incr = fold_convert (sizetype, incr);
6303 incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
6304 size_int (-align));
6305 incr = fold_convert (ptr_type_node, incr);
6306 }
6307
6308 gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
6309 addr = incr;
6310
6311 if (BYTES_BIG_ENDIAN && size < rsize)
6312 addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
6313 size_int (rsize - size));
6314
6315 if (indirect)
6316 {
6317 addr = fold_convert (build_pointer_type (ptrtype), addr);
6318 addr = build_va_arg_indirect_ref (addr);
6319 }
6320
6321 /* If the address isn't aligned properly for the type, we need a temporary.
6322 FIXME: This is inefficient, usually we can do this in registers. */
6323 else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
6324 {
6325 tree tmp = create_tmp_var (type, "va_arg_tmp");
6326 tree dest_addr = build_fold_addr_expr (tmp);
6327 tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
6328 3, dest_addr, addr, size_int (rsize));
6329 TREE_ADDRESSABLE (tmp) = 1;
6330 gimplify_and_add (copy, pre_p);
6331 addr = dest_addr;
6332 }
6333
6334 else
6335 addr = fold_convert (ptrtype, addr);
6336
6337 incr
6338 = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
6339 gimplify_assign (valist, incr, post_p);
6340
6341 return build_va_arg_indirect_ref (addr);
6342 }
6343 \f
6344 /* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
6345 Specify whether the vector mode is supported by the hardware. */
6346
6347 static bool
6348 sparc_vector_mode_supported_p (enum machine_mode mode)
6349 {
6350 return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
6351 }
6352 \f
6353 /* Implement the TARGET_VECTORIZE_PREFERRED_SIMD_MODE target hook. */
6354
6355 static enum machine_mode
6356 sparc_preferred_simd_mode (enum machine_mode mode)
6357 {
6358 if (TARGET_VIS)
6359 switch (mode)
6360 {
6361 case SImode:
6362 return V2SImode;
6363 case HImode:
6364 return V4HImode;
6365 case QImode:
6366 return V8QImode;
6367
6368 default:;
6369 }
6370
6371 return word_mode;
6372 }
6373 \f
6374 /* Return the string to output an unconditional branch to LABEL, which is
6375 the operand number of the label.
6376
6377 DEST is the destination insn (i.e. the label), INSN is the source. */
6378
6379 const char *
6380 output_ubranch (rtx dest, int label, rtx insn)
6381 {
6382 static char string[64];
6383 bool v9_form = false;
6384 char *p;
6385
6386 if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
6387 {
6388 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6389 - INSN_ADDRESSES (INSN_UID (insn)));
6390 /* Leave some instructions for "slop". */
6391 if (delta >= -260000 && delta < 260000)
6392 v9_form = true;
6393 }
6394
6395 if (v9_form)
6396 strcpy (string, "ba%*,pt\t%%xcc, ");
6397 else
6398 strcpy (string, "b%*\t");
6399
6400 p = strchr (string, '\0');
6401 *p++ = '%';
6402 *p++ = 'l';
6403 *p++ = '0' + label;
6404 *p++ = '%';
6405 *p++ = '(';
6406 *p = '\0';
6407
6408 return string;
6409 }
6410
6411 /* Return the string to output a conditional branch to LABEL, which is
6412 the operand number of the label. OP is the conditional expression.
6413 XEXP (OP, 0) is assumed to be a condition code register (integer or
6414 floating point) and its mode specifies what kind of comparison we made.
6415
6416 DEST is the destination insn (i.e. the label), INSN is the source.
6417
6418 REVERSED is nonzero if we should reverse the sense of the comparison.
6419
6420 ANNUL is nonzero if we should generate an annulling branch. */
6421
6422 const char *
6423 output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
6424 rtx insn)
6425 {
6426 static char string[64];
6427 enum rtx_code code = GET_CODE (op);
6428 rtx cc_reg = XEXP (op, 0);
6429 enum machine_mode mode = GET_MODE (cc_reg);
6430 const char *labelno, *branch;
6431 int spaces = 8, far;
6432 char *p;
6433
6434 /* v9 branches are limited to +-1MB. If it is too far away,
6435 change
6436
6437 bne,pt %xcc, .LC30
6438
6439 to
6440
6441 be,pn %xcc, .+12
6442 nop
6443 ba .LC30
6444
6445 and
6446
6447 fbne,a,pn %fcc2, .LC29
6448
6449 to
6450
6451 fbe,pt %fcc2, .+16
6452 nop
6453 ba .LC29 */
6454
6455 far = TARGET_V9 && (get_attr_length (insn) >= 3);
6456 if (reversed ^ far)
6457 {
6458 /* Reversal of FP compares takes care -- an ordered compare
6459 becomes an unordered compare and vice versa. */
6460 if (mode == CCFPmode || mode == CCFPEmode)
6461 code = reverse_condition_maybe_unordered (code);
6462 else
6463 code = reverse_condition (code);
6464 }
6465
6466 /* Start by writing the branch condition. */
6467 if (mode == CCFPmode || mode == CCFPEmode)
6468 {
6469 switch (code)
6470 {
6471 case NE:
6472 branch = "fbne";
6473 break;
6474 case EQ:
6475 branch = "fbe";
6476 break;
6477 case GE:
6478 branch = "fbge";
6479 break;
6480 case GT:
6481 branch = "fbg";
6482 break;
6483 case LE:
6484 branch = "fble";
6485 break;
6486 case LT:
6487 branch = "fbl";
6488 break;
6489 case UNORDERED:
6490 branch = "fbu";
6491 break;
6492 case ORDERED:
6493 branch = "fbo";
6494 break;
6495 case UNGT:
6496 branch = "fbug";
6497 break;
6498 case UNLT:
6499 branch = "fbul";
6500 break;
6501 case UNEQ:
6502 branch = "fbue";
6503 break;
6504 case UNGE:
6505 branch = "fbuge";
6506 break;
6507 case UNLE:
6508 branch = "fbule";
6509 break;
6510 case LTGT:
6511 branch = "fblg";
6512 break;
6513
6514 default:
6515 gcc_unreachable ();
6516 }
6517
6518 /* ??? !v9: FP branches cannot be preceded by another floating point
6519 insn. Because there is currently no concept of pre-delay slots,
6520 we can fix this only by always emitting a nop before a floating
6521 point branch. */
6522
6523 string[0] = '\0';
6524 if (! TARGET_V9)
6525 strcpy (string, "nop\n\t");
6526 strcat (string, branch);
6527 }
6528 else
6529 {
6530 switch (code)
6531 {
6532 case NE:
6533 branch = "bne";
6534 break;
6535 case EQ:
6536 branch = "be";
6537 break;
6538 case GE:
6539 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6540 branch = "bpos";
6541 else
6542 branch = "bge";
6543 break;
6544 case GT:
6545 branch = "bg";
6546 break;
6547 case LE:
6548 branch = "ble";
6549 break;
6550 case LT:
6551 if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6552 branch = "bneg";
6553 else
6554 branch = "bl";
6555 break;
6556 case GEU:
6557 branch = "bgeu";
6558 break;
6559 case GTU:
6560 branch = "bgu";
6561 break;
6562 case LEU:
6563 branch = "bleu";
6564 break;
6565 case LTU:
6566 branch = "blu";
6567 break;
6568
6569 default:
6570 gcc_unreachable ();
6571 }
6572 strcpy (string, branch);
6573 }
6574 spaces -= strlen (branch);
6575 p = strchr (string, '\0');
6576
6577 /* Now add the annulling, the label, and a possible noop. */
6578 if (annul && ! far)
6579 {
6580 strcpy (p, ",a");
6581 p += 2;
6582 spaces -= 2;
6583 }
6584
6585 if (TARGET_V9)
6586 {
6587 rtx note;
6588 int v8 = 0;
6589
6590 if (! far && insn && INSN_ADDRESSES_SET_P ())
6591 {
6592 int delta = (INSN_ADDRESSES (INSN_UID (dest))
6593 - INSN_ADDRESSES (INSN_UID (insn)));
6594 /* Leave some instructions for "slop". */
6595 if (delta < -260000 || delta >= 260000)
6596 v8 = 1;
6597 }
6598
6599 if (mode == CCFPmode || mode == CCFPEmode)
6600 {
6601 static char v9_fcc_labelno[] = "%%fccX, ";
6602 /* Set the char indicating the number of the fcc reg to use. */
6603 v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6604 labelno = v9_fcc_labelno;
6605 if (v8)
6606 {
6607 gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6608 labelno = "";
6609 }
6610 }
6611 else if (mode == CCXmode || mode == CCX_NOOVmode)
6612 {
6613 labelno = "%%xcc, ";
6614 gcc_assert (! v8);
6615 }
6616 else
6617 {
6618 labelno = "%%icc, ";
6619 if (v8)
6620 labelno = "";
6621 }
6622
6623 if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6624 {
6625 strcpy (p,
6626 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6627 ? ",pt" : ",pn");
6628 p += 3;
6629 spaces -= 3;
6630 }
6631 }
6632 else
6633 labelno = "";
6634
6635 if (spaces > 0)
6636 *p++ = '\t';
6637 else
6638 *p++ = ' ';
6639 strcpy (p, labelno);
6640 p = strchr (p, '\0');
6641 if (far)
6642 {
6643 strcpy (p, ".+12\n\t nop\n\tb\t");
6644 /* Skip the next insn if requested or
6645 if we know that it will be a nop. */
6646 if (annul || ! final_sequence)
6647 p[3] = '6';
6648 p += 14;
6649 }
6650 *p++ = '%';
6651 *p++ = 'l';
6652 *p++ = label + '0';
6653 *p++ = '%';
6654 *p++ = '#';
6655 *p = '\0';
6656
6657 return string;
6658 }
6659
6660 /* Emit a library call comparison between floating point X and Y.
6661 COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6662 Return the new operator to be used in the comparison sequence.
6663
6664 TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6665 values as arguments instead of the TFmode registers themselves,
6666 that's why we cannot call emit_float_lib_cmp. */
6667
6668 rtx
6669 sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6670 {
6671 const char *qpfunc;
6672 rtx slot0, slot1, result, tem, tem2, libfunc;
6673 enum machine_mode mode;
6674 enum rtx_code new_comparison;
6675
6676 switch (comparison)
6677 {
6678 case EQ:
6679 qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6680 break;
6681
6682 case NE:
6683 qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6684 break;
6685
6686 case GT:
6687 qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6688 break;
6689
6690 case GE:
6691 qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6692 break;
6693
6694 case LT:
6695 qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6696 break;
6697
6698 case LE:
6699 qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6700 break;
6701
6702 case ORDERED:
6703 case UNORDERED:
6704 case UNGT:
6705 case UNLT:
6706 case UNEQ:
6707 case UNGE:
6708 case UNLE:
6709 case LTGT:
6710 qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6711 break;
6712
6713 default:
6714 gcc_unreachable ();
6715 }
6716
6717 if (TARGET_ARCH64)
6718 {
6719 if (MEM_P (x))
6720 slot0 = x;
6721 else
6722 {
6723 slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6724 emit_move_insn (slot0, x);
6725 }
6726
6727 if (MEM_P (y))
6728 slot1 = y;
6729 else
6730 {
6731 slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6732 emit_move_insn (slot1, y);
6733 }
6734
6735 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6736 emit_library_call (libfunc, LCT_NORMAL,
6737 DImode, 2,
6738 XEXP (slot0, 0), Pmode,
6739 XEXP (slot1, 0), Pmode);
6740 mode = DImode;
6741 }
6742 else
6743 {
6744 libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6745 emit_library_call (libfunc, LCT_NORMAL,
6746 SImode, 2,
6747 x, TFmode, y, TFmode);
6748 mode = SImode;
6749 }
6750
6751
6752 /* Immediately move the result of the libcall into a pseudo
6753 register so reload doesn't clobber the value if it needs
6754 the return register for a spill reg. */
6755 result = gen_reg_rtx (mode);
6756 emit_move_insn (result, hard_libcall_value (mode, libfunc));
6757
6758 switch (comparison)
6759 {
6760 default:
6761 return gen_rtx_NE (VOIDmode, result, const0_rtx);
6762 case ORDERED:
6763 case UNORDERED:
6764 new_comparison = (comparison == UNORDERED ? EQ : NE);
6765 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6766 case UNGT:
6767 case UNGE:
6768 new_comparison = (comparison == UNGT ? GT : NE);
6769 return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6770 case UNLE:
6771 return gen_rtx_NE (VOIDmode, result, const2_rtx);
6772 case UNLT:
6773 tem = gen_reg_rtx (mode);
6774 if (TARGET_ARCH32)
6775 emit_insn (gen_andsi3 (tem, result, const1_rtx));
6776 else
6777 emit_insn (gen_anddi3 (tem, result, const1_rtx));
6778 return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6779 case UNEQ:
6780 case LTGT:
6781 tem = gen_reg_rtx (mode);
6782 if (TARGET_ARCH32)
6783 emit_insn (gen_addsi3 (tem, result, const1_rtx));
6784 else
6785 emit_insn (gen_adddi3 (tem, result, const1_rtx));
6786 tem2 = gen_reg_rtx (mode);
6787 if (TARGET_ARCH32)
6788 emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6789 else
6790 emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6791 new_comparison = (comparison == UNEQ ? EQ : NE);
6792 return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6793 }
6794
6795 gcc_unreachable ();
6796 }
6797
6798 /* Generate an unsigned DImode to FP conversion. This is the same code
6799 optabs would emit if we didn't have TFmode patterns. */
6800
6801 void
6802 sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6803 {
6804 rtx neglab, donelab, i0, i1, f0, in, out;
6805
6806 out = operands[0];
6807 in = force_reg (DImode, operands[1]);
6808 neglab = gen_label_rtx ();
6809 donelab = gen_label_rtx ();
6810 i0 = gen_reg_rtx (DImode);
6811 i1 = gen_reg_rtx (DImode);
6812 f0 = gen_reg_rtx (mode);
6813
6814 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6815
6816 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6817 emit_jump_insn (gen_jump (donelab));
6818 emit_barrier ();
6819
6820 emit_label (neglab);
6821
6822 emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6823 emit_insn (gen_anddi3 (i1, in, const1_rtx));
6824 emit_insn (gen_iordi3 (i0, i0, i1));
6825 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6826 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6827
6828 emit_label (donelab);
6829 }
6830
6831 /* Generate an FP to unsigned DImode conversion. This is the same code
6832 optabs would emit if we didn't have TFmode patterns. */
6833
6834 void
6835 sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6836 {
6837 rtx neglab, donelab, i0, i1, f0, in, out, limit;
6838
6839 out = operands[0];
6840 in = force_reg (mode, operands[1]);
6841 neglab = gen_label_rtx ();
6842 donelab = gen_label_rtx ();
6843 i0 = gen_reg_rtx (DImode);
6844 i1 = gen_reg_rtx (DImode);
6845 limit = gen_reg_rtx (mode);
6846 f0 = gen_reg_rtx (mode);
6847
6848 emit_move_insn (limit,
6849 CONST_DOUBLE_FROM_REAL_VALUE (
6850 REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6851 emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6852
6853 emit_insn (gen_rtx_SET (VOIDmode,
6854 out,
6855 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6856 emit_jump_insn (gen_jump (donelab));
6857 emit_barrier ();
6858
6859 emit_label (neglab);
6860
6861 emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6862 emit_insn (gen_rtx_SET (VOIDmode,
6863 i0,
6864 gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6865 emit_insn (gen_movdi (i1, const1_rtx));
6866 emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6867 emit_insn (gen_xordi3 (out, i0, i1));
6868
6869 emit_label (donelab);
6870 }
6871
6872 /* Return the string to output a conditional branch to LABEL, testing
6873 register REG. LABEL is the operand number of the label; REG is the
6874 operand number of the reg. OP is the conditional expression. The mode
6875 of REG says what kind of comparison we made.
6876
6877 DEST is the destination insn (i.e. the label), INSN is the source.
6878
6879 REVERSED is nonzero if we should reverse the sense of the comparison.
6880
6881 ANNUL is nonzero if we should generate an annulling branch. */
6882
6883 const char *
6884 output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6885 int annul, rtx insn)
6886 {
6887 static char string[64];
6888 enum rtx_code code = GET_CODE (op);
6889 enum machine_mode mode = GET_MODE (XEXP (op, 0));
6890 rtx note;
6891 int far;
6892 char *p;
6893
6894 /* branch on register are limited to +-128KB. If it is too far away,
6895 change
6896
6897 brnz,pt %g1, .LC30
6898
6899 to
6900
6901 brz,pn %g1, .+12
6902 nop
6903 ba,pt %xcc, .LC30
6904
6905 and
6906
6907 brgez,a,pn %o1, .LC29
6908
6909 to
6910
6911 brlz,pt %o1, .+16
6912 nop
6913 ba,pt %xcc, .LC29 */
6914
6915 far = get_attr_length (insn) >= 3;
6916
6917 /* If not floating-point or if EQ or NE, we can just reverse the code. */
6918 if (reversed ^ far)
6919 code = reverse_condition (code);
6920
6921 /* Only 64 bit versions of these instructions exist. */
6922 gcc_assert (mode == DImode);
6923
6924 /* Start by writing the branch condition. */
6925
6926 switch (code)
6927 {
6928 case NE:
6929 strcpy (string, "brnz");
6930 break;
6931
6932 case EQ:
6933 strcpy (string, "brz");
6934 break;
6935
6936 case GE:
6937 strcpy (string, "brgez");
6938 break;
6939
6940 case LT:
6941 strcpy (string, "brlz");
6942 break;
6943
6944 case LE:
6945 strcpy (string, "brlez");
6946 break;
6947
6948 case GT:
6949 strcpy (string, "brgz");
6950 break;
6951
6952 default:
6953 gcc_unreachable ();
6954 }
6955
6956 p = strchr (string, '\0');
6957
6958 /* Now add the annulling, reg, label, and nop. */
6959 if (annul && ! far)
6960 {
6961 strcpy (p, ",a");
6962 p += 2;
6963 }
6964
6965 if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6966 {
6967 strcpy (p,
6968 ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6969 ? ",pt" : ",pn");
6970 p += 3;
6971 }
6972
6973 *p = p < string + 8 ? '\t' : ' ';
6974 p++;
6975 *p++ = '%';
6976 *p++ = '0' + reg;
6977 *p++ = ',';
6978 *p++ = ' ';
6979 if (far)
6980 {
6981 int veryfar = 1, delta;
6982
6983 if (INSN_ADDRESSES_SET_P ())
6984 {
6985 delta = (INSN_ADDRESSES (INSN_UID (dest))
6986 - INSN_ADDRESSES (INSN_UID (insn)));
6987 /* Leave some instructions for "slop". */
6988 if (delta >= -260000 && delta < 260000)
6989 veryfar = 0;
6990 }
6991
6992 strcpy (p, ".+12\n\t nop\n\t");
6993 /* Skip the next insn if requested or
6994 if we know that it will be a nop. */
6995 if (annul || ! final_sequence)
6996 p[3] = '6';
6997 p += 12;
6998 if (veryfar)
6999 {
7000 strcpy (p, "b\t");
7001 p += 2;
7002 }
7003 else
7004 {
7005 strcpy (p, "ba,pt\t%%xcc, ");
7006 p += 13;
7007 }
7008 }
7009 *p++ = '%';
7010 *p++ = 'l';
7011 *p++ = '0' + label;
7012 *p++ = '%';
7013 *p++ = '#';
7014 *p = '\0';
7015
7016 return string;
7017 }
7018
7019 /* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
7020 Such instructions cannot be used in the delay slot of return insn on v9.
7021 If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
7022 */
7023
7024 static int
7025 epilogue_renumber (register rtx *where, int test)
7026 {
7027 register const char *fmt;
7028 register int i;
7029 register enum rtx_code code;
7030
7031 if (*where == 0)
7032 return 0;
7033
7034 code = GET_CODE (*where);
7035
7036 switch (code)
7037 {
7038 case REG:
7039 if (REGNO (*where) >= 8 && REGNO (*where) < 24) /* oX or lX */
7040 return 1;
7041 if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
7042 *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
7043 case SCRATCH:
7044 case CC0:
7045 case PC:
7046 case CONST_INT:
7047 case CONST_DOUBLE:
7048 return 0;
7049
7050 /* Do not replace the frame pointer with the stack pointer because
7051 it can cause the delayed instruction to load below the stack.
7052 This occurs when instructions like:
7053
7054 (set (reg/i:SI 24 %i0)
7055 (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
7056 (const_int -20 [0xffffffec])) 0))
7057
7058 are in the return delayed slot. */
7059 case PLUS:
7060 if (GET_CODE (XEXP (*where, 0)) == REG
7061 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
7062 && (GET_CODE (XEXP (*where, 1)) != CONST_INT
7063 || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
7064 return 1;
7065 break;
7066
7067 case MEM:
7068 if (SPARC_STACK_BIAS
7069 && GET_CODE (XEXP (*where, 0)) == REG
7070 && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
7071 return 1;
7072 break;
7073
7074 default:
7075 break;
7076 }
7077
7078 fmt = GET_RTX_FORMAT (code);
7079
7080 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
7081 {
7082 if (fmt[i] == 'E')
7083 {
7084 register int j;
7085 for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
7086 if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
7087 return 1;
7088 }
7089 else if (fmt[i] == 'e'
7090 && epilogue_renumber (&(XEXP (*where, i)), test))
7091 return 1;
7092 }
7093 return 0;
7094 }
7095 \f
7096 /* Leaf functions and non-leaf functions have different needs. */
7097
7098 static const int
7099 reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
7100
7101 static const int
7102 reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
7103
7104 static const int *const reg_alloc_orders[] = {
7105 reg_leaf_alloc_order,
7106 reg_nonleaf_alloc_order};
7107
7108 void
7109 order_regs_for_local_alloc (void)
7110 {
7111 static int last_order_nonleaf = 1;
7112
7113 if (df_regs_ever_live_p (15) != last_order_nonleaf)
7114 {
7115 last_order_nonleaf = !last_order_nonleaf;
7116 memcpy ((char *) reg_alloc_order,
7117 (const char *) reg_alloc_orders[last_order_nonleaf],
7118 FIRST_PSEUDO_REGISTER * sizeof (int));
7119 }
7120 }
7121 \f
7122 /* Return 1 if REG and MEM are legitimate enough to allow the various
7123 mem<-->reg splits to be run. */
7124
7125 int
7126 sparc_splitdi_legitimate (rtx reg, rtx mem)
7127 {
7128 /* Punt if we are here by mistake. */
7129 gcc_assert (reload_completed);
7130
7131 /* We must have an offsettable memory reference. */
7132 if (! offsettable_memref_p (mem))
7133 return 0;
7134
7135 /* If we have legitimate args for ldd/std, we do not want
7136 the split to happen. */
7137 if ((REGNO (reg) % 2) == 0
7138 && mem_min_alignment (mem, 8))
7139 return 0;
7140
7141 /* Success. */
7142 return 1;
7143 }
7144
7145 /* Return 1 if x and y are some kind of REG and they refer to
7146 different hard registers. This test is guaranteed to be
7147 run after reload. */
7148
7149 int
7150 sparc_absnegfloat_split_legitimate (rtx x, rtx y)
7151 {
7152 if (GET_CODE (x) != REG)
7153 return 0;
7154 if (GET_CODE (y) != REG)
7155 return 0;
7156 if (REGNO (x) == REGNO (y))
7157 return 0;
7158 return 1;
7159 }
7160
7161 /* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
7162 This makes them candidates for using ldd and std insns.
7163
7164 Note reg1 and reg2 *must* be hard registers. */
7165
7166 int
7167 registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
7168 {
7169 /* We might have been passed a SUBREG. */
7170 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
7171 return 0;
7172
7173 if (REGNO (reg1) % 2 != 0)
7174 return 0;
7175
7176 /* Integer ldd is deprecated in SPARC V9 */
7177 if (TARGET_V9 && REGNO (reg1) < 32)
7178 return 0;
7179
7180 return (REGNO (reg1) == REGNO (reg2) - 1);
7181 }
7182
7183 /* Return 1 if the addresses in mem1 and mem2 are suitable for use in
7184 an ldd or std insn.
7185
7186 This can only happen when addr1 and addr2, the addresses in mem1
7187 and mem2, are consecutive memory locations (addr1 + 4 == addr2).
7188 addr1 must also be aligned on a 64-bit boundary.
7189
7190 Also iff dependent_reg_rtx is not null it should not be used to
7191 compute the address for mem1, i.e. we cannot optimize a sequence
7192 like:
7193 ld [%o0], %o0
7194 ld [%o0 + 4], %o1
7195 to
7196 ldd [%o0], %o0
7197 nor:
7198 ld [%g3 + 4], %g3
7199 ld [%g3], %g2
7200 to
7201 ldd [%g3], %g2
7202
7203 But, note that the transformation from:
7204 ld [%g2 + 4], %g3
7205 ld [%g2], %g2
7206 to
7207 ldd [%g2], %g2
7208 is perfectly fine. Thus, the peephole2 patterns always pass us
7209 the destination register of the first load, never the second one.
7210
7211 For stores we don't have a similar problem, so dependent_reg_rtx is
7212 NULL_RTX. */
7213
7214 int
7215 mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
7216 {
7217 rtx addr1, addr2;
7218 unsigned int reg1;
7219 HOST_WIDE_INT offset1;
7220
7221 /* The mems cannot be volatile. */
7222 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
7223 return 0;
7224
7225 /* MEM1 should be aligned on a 64-bit boundary. */
7226 if (MEM_ALIGN (mem1) < 64)
7227 return 0;
7228
7229 addr1 = XEXP (mem1, 0);
7230 addr2 = XEXP (mem2, 0);
7231
7232 /* Extract a register number and offset (if used) from the first addr. */
7233 if (GET_CODE (addr1) == PLUS)
7234 {
7235 /* If not a REG, return zero. */
7236 if (GET_CODE (XEXP (addr1, 0)) != REG)
7237 return 0;
7238 else
7239 {
7240 reg1 = REGNO (XEXP (addr1, 0));
7241 /* The offset must be constant! */
7242 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
7243 return 0;
7244 offset1 = INTVAL (XEXP (addr1, 1));
7245 }
7246 }
7247 else if (GET_CODE (addr1) != REG)
7248 return 0;
7249 else
7250 {
7251 reg1 = REGNO (addr1);
7252 /* This was a simple (mem (reg)) expression. Offset is 0. */
7253 offset1 = 0;
7254 }
7255
7256 /* Make sure the second address is a (mem (plus (reg) (const_int). */
7257 if (GET_CODE (addr2) != PLUS)
7258 return 0;
7259
7260 if (GET_CODE (XEXP (addr2, 0)) != REG
7261 || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
7262 return 0;
7263
7264 if (reg1 != REGNO (XEXP (addr2, 0)))
7265 return 0;
7266
7267 if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
7268 return 0;
7269
7270 /* The first offset must be evenly divisible by 8 to ensure the
7271 address is 64 bit aligned. */
7272 if (offset1 % 8 != 0)
7273 return 0;
7274
7275 /* The offset for the second addr must be 4 more than the first addr. */
7276 if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
7277 return 0;
7278
7279 /* All the tests passed. addr1 and addr2 are valid for ldd and std
7280 instructions. */
7281 return 1;
7282 }
7283
7284 /* Return 1 if reg is a pseudo, or is the first register in
7285 a hard register pair. This makes it suitable for use in
7286 ldd and std insns. */
7287
7288 int
7289 register_ok_for_ldd (rtx reg)
7290 {
7291 /* We might have been passed a SUBREG. */
7292 if (!REG_P (reg))
7293 return 0;
7294
7295 if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
7296 return (REGNO (reg) % 2 == 0);
7297
7298 return 1;
7299 }
7300
7301 /* Return 1 if OP is a memory whose address is known to be
7302 aligned to 8-byte boundary, or a pseudo during reload.
7303 This makes it suitable for use in ldd and std insns. */
7304
7305 int
7306 memory_ok_for_ldd (rtx op)
7307 {
7308 if (MEM_P (op))
7309 {
7310 /* In 64-bit mode, we assume that the address is word-aligned. */
7311 if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
7312 return 0;
7313
7314 if ((reload_in_progress || reload_completed)
7315 && !strict_memory_address_p (Pmode, XEXP (op, 0)))
7316 return 0;
7317 }
7318 else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
7319 {
7320 if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
7321 return 0;
7322 }
7323 else
7324 return 0;
7325
7326 return 1;
7327 }
7328 \f
7329 /* Print operand X (an rtx) in assembler syntax to file FILE.
7330 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
7331 For `%' followed by punctuation, CODE is the punctuation and X is null. */
7332
7333 void
7334 print_operand (FILE *file, rtx x, int code)
7335 {
7336 switch (code)
7337 {
7338 case '#':
7339 /* Output an insn in a delay slot. */
7340 if (final_sequence)
7341 sparc_indent_opcode = 1;
7342 else
7343 fputs ("\n\t nop", file);
7344 return;
7345 case '*':
7346 /* Output an annul flag if there's nothing for the delay slot and we
7347 are optimizing. This is always used with '(' below.
7348 Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
7349 this is a dbx bug. So, we only do this when optimizing.
7350 On UltraSPARC, a branch in a delay slot causes a pipeline flush.
7351 Always emit a nop in case the next instruction is a branch. */
7352 if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
7353 fputs (",a", file);
7354 return;
7355 case '(':
7356 /* Output a 'nop' if there's nothing for the delay slot and we are
7357 not optimizing. This is always used with '*' above. */
7358 if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
7359 fputs ("\n\t nop", file);
7360 else if (final_sequence)
7361 sparc_indent_opcode = 1;
7362 return;
7363 case ')':
7364 /* Output the right displacement from the saved PC on function return.
7365 The caller may have placed an "unimp" insn immediately after the call
7366 so we have to account for it. This insn is used in the 32-bit ABI
7367 when calling a function that returns a non zero-sized structure. The
7368 64-bit ABI doesn't have it. Be careful to have this test be the same
7369 as that for the call. The exception is when sparc_std_struct_return
7370 is enabled, the psABI is followed exactly and the adjustment is made
7371 by the code in sparc_struct_value_rtx. The call emitted is the same
7372 when sparc_std_struct_return is enabled. */
7373 if (!TARGET_ARCH64
7374 && cfun->returns_struct
7375 && !sparc_std_struct_return
7376 && DECL_SIZE (DECL_RESULT (current_function_decl))
7377 && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
7378 == INTEGER_CST
7379 && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
7380 fputs ("12", file);
7381 else
7382 fputc ('8', file);
7383 return;
7384 case '_':
7385 /* Output the Embedded Medium/Anywhere code model base register. */
7386 fputs (EMBMEDANY_BASE_REG, file);
7387 return;
7388 case '&':
7389 /* Print some local dynamic TLS name. */
7390 assemble_name (file, get_some_local_dynamic_name ());
7391 return;
7392
7393 case 'Y':
7394 /* Adjust the operand to take into account a RESTORE operation. */
7395 if (GET_CODE (x) == CONST_INT)
7396 break;
7397 else if (GET_CODE (x) != REG)
7398 output_operand_lossage ("invalid %%Y operand");
7399 else if (REGNO (x) < 8)
7400 fputs (reg_names[REGNO (x)], file);
7401 else if (REGNO (x) >= 24 && REGNO (x) < 32)
7402 fputs (reg_names[REGNO (x)-16], file);
7403 else
7404 output_operand_lossage ("invalid %%Y operand");
7405 return;
7406 case 'L':
7407 /* Print out the low order register name of a register pair. */
7408 if (WORDS_BIG_ENDIAN)
7409 fputs (reg_names[REGNO (x)+1], file);
7410 else
7411 fputs (reg_names[REGNO (x)], file);
7412 return;
7413 case 'H':
7414 /* Print out the high order register name of a register pair. */
7415 if (WORDS_BIG_ENDIAN)
7416 fputs (reg_names[REGNO (x)], file);
7417 else
7418 fputs (reg_names[REGNO (x)+1], file);
7419 return;
7420 case 'R':
7421 /* Print out the second register name of a register pair or quad.
7422 I.e., R (%o0) => %o1. */
7423 fputs (reg_names[REGNO (x)+1], file);
7424 return;
7425 case 'S':
7426 /* Print out the third register name of a register quad.
7427 I.e., S (%o0) => %o2. */
7428 fputs (reg_names[REGNO (x)+2], file);
7429 return;
7430 case 'T':
7431 /* Print out the fourth register name of a register quad.
7432 I.e., T (%o0) => %o3. */
7433 fputs (reg_names[REGNO (x)+3], file);
7434 return;
7435 case 'x':
7436 /* Print a condition code register. */
7437 if (REGNO (x) == SPARC_ICC_REG)
7438 {
7439 /* We don't handle CC[X]_NOOVmode because they're not supposed
7440 to occur here. */
7441 if (GET_MODE (x) == CCmode)
7442 fputs ("%icc", file);
7443 else if (GET_MODE (x) == CCXmode)
7444 fputs ("%xcc", file);
7445 else
7446 gcc_unreachable ();
7447 }
7448 else
7449 /* %fccN register */
7450 fputs (reg_names[REGNO (x)], file);
7451 return;
7452 case 'm':
7453 /* Print the operand's address only. */
7454 output_address (XEXP (x, 0));
7455 return;
7456 case 'r':
7457 /* In this case we need a register. Use %g0 if the
7458 operand is const0_rtx. */
7459 if (x == const0_rtx
7460 || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7461 {
7462 fputs ("%g0", file);
7463 return;
7464 }
7465 else
7466 break;
7467
7468 case 'A':
7469 switch (GET_CODE (x))
7470 {
7471 case IOR: fputs ("or", file); break;
7472 case AND: fputs ("and", file); break;
7473 case XOR: fputs ("xor", file); break;
7474 default: output_operand_lossage ("invalid %%A operand");
7475 }
7476 return;
7477
7478 case 'B':
7479 switch (GET_CODE (x))
7480 {
7481 case IOR: fputs ("orn", file); break;
7482 case AND: fputs ("andn", file); break;
7483 case XOR: fputs ("xnor", file); break;
7484 default: output_operand_lossage ("invalid %%B operand");
7485 }
7486 return;
7487
7488 /* These are used by the conditional move instructions. */
7489 case 'c' :
7490 case 'C':
7491 {
7492 enum rtx_code rc = GET_CODE (x);
7493
7494 if (code == 'c')
7495 {
7496 enum machine_mode mode = GET_MODE (XEXP (x, 0));
7497 if (mode == CCFPmode || mode == CCFPEmode)
7498 rc = reverse_condition_maybe_unordered (GET_CODE (x));
7499 else
7500 rc = reverse_condition (GET_CODE (x));
7501 }
7502 switch (rc)
7503 {
7504 case NE: fputs ("ne", file); break;
7505 case EQ: fputs ("e", file); break;
7506 case GE: fputs ("ge", file); break;
7507 case GT: fputs ("g", file); break;
7508 case LE: fputs ("le", file); break;
7509 case LT: fputs ("l", file); break;
7510 case GEU: fputs ("geu", file); break;
7511 case GTU: fputs ("gu", file); break;
7512 case LEU: fputs ("leu", file); break;
7513 case LTU: fputs ("lu", file); break;
7514 case LTGT: fputs ("lg", file); break;
7515 case UNORDERED: fputs ("u", file); break;
7516 case ORDERED: fputs ("o", file); break;
7517 case UNLT: fputs ("ul", file); break;
7518 case UNLE: fputs ("ule", file); break;
7519 case UNGT: fputs ("ug", file); break;
7520 case UNGE: fputs ("uge", file); break;
7521 case UNEQ: fputs ("ue", file); break;
7522 default: output_operand_lossage (code == 'c'
7523 ? "invalid %%c operand"
7524 : "invalid %%C operand");
7525 }
7526 return;
7527 }
7528
7529 /* These are used by the movr instruction pattern. */
7530 case 'd':
7531 case 'D':
7532 {
7533 enum rtx_code rc = (code == 'd'
7534 ? reverse_condition (GET_CODE (x))
7535 : GET_CODE (x));
7536 switch (rc)
7537 {
7538 case NE: fputs ("ne", file); break;
7539 case EQ: fputs ("e", file); break;
7540 case GE: fputs ("gez", file); break;
7541 case LT: fputs ("lz", file); break;
7542 case LE: fputs ("lez", file); break;
7543 case GT: fputs ("gz", file); break;
7544 default: output_operand_lossage (code == 'd'
7545 ? "invalid %%d operand"
7546 : "invalid %%D operand");
7547 }
7548 return;
7549 }
7550
7551 case 'b':
7552 {
7553 /* Print a sign-extended character. */
7554 int i = trunc_int_for_mode (INTVAL (x), QImode);
7555 fprintf (file, "%d", i);
7556 return;
7557 }
7558
7559 case 'f':
7560 /* Operand must be a MEM; write its address. */
7561 if (GET_CODE (x) != MEM)
7562 output_operand_lossage ("invalid %%f operand");
7563 output_address (XEXP (x, 0));
7564 return;
7565
7566 case 's':
7567 {
7568 /* Print a sign-extended 32-bit value. */
7569 HOST_WIDE_INT i;
7570 if (GET_CODE(x) == CONST_INT)
7571 i = INTVAL (x);
7572 else if (GET_CODE(x) == CONST_DOUBLE)
7573 i = CONST_DOUBLE_LOW (x);
7574 else
7575 {
7576 output_operand_lossage ("invalid %%s operand");
7577 return;
7578 }
7579 i = trunc_int_for_mode (i, SImode);
7580 fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7581 return;
7582 }
7583
7584 case 0:
7585 /* Do nothing special. */
7586 break;
7587
7588 default:
7589 /* Undocumented flag. */
7590 output_operand_lossage ("invalid operand output code");
7591 }
7592
7593 if (GET_CODE (x) == REG)
7594 fputs (reg_names[REGNO (x)], file);
7595 else if (GET_CODE (x) == MEM)
7596 {
7597 fputc ('[', file);
7598 /* Poor Sun assembler doesn't understand absolute addressing. */
7599 if (CONSTANT_P (XEXP (x, 0)))
7600 fputs ("%g0+", file);
7601 output_address (XEXP (x, 0));
7602 fputc (']', file);
7603 }
7604 else if (GET_CODE (x) == HIGH)
7605 {
7606 fputs ("%hi(", file);
7607 output_addr_const (file, XEXP (x, 0));
7608 fputc (')', file);
7609 }
7610 else if (GET_CODE (x) == LO_SUM)
7611 {
7612 print_operand (file, XEXP (x, 0), 0);
7613 if (TARGET_CM_MEDMID)
7614 fputs ("+%l44(", file);
7615 else
7616 fputs ("+%lo(", file);
7617 output_addr_const (file, XEXP (x, 1));
7618 fputc (')', file);
7619 }
7620 else if (GET_CODE (x) == CONST_DOUBLE
7621 && (GET_MODE (x) == VOIDmode
7622 || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7623 {
7624 if (CONST_DOUBLE_HIGH (x) == 0)
7625 fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7626 else if (CONST_DOUBLE_HIGH (x) == -1
7627 && CONST_DOUBLE_LOW (x) < 0)
7628 fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7629 else
7630 output_operand_lossage ("long long constant not a valid immediate operand");
7631 }
7632 else if (GET_CODE (x) == CONST_DOUBLE)
7633 output_operand_lossage ("floating point constant not a valid immediate operand");
7634 else { output_addr_const (file, x); }
7635 }
7636 \f
7637 /* Target hook for assembling integer objects. The sparc version has
7638 special handling for aligned DI-mode objects. */
7639
7640 static bool
7641 sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7642 {
7643 /* ??? We only output .xword's for symbols and only then in environments
7644 where the assembler can handle them. */
7645 if (aligned_p && size == 8
7646 && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7647 {
7648 if (TARGET_V9)
7649 {
7650 assemble_integer_with_op ("\t.xword\t", x);
7651 return true;
7652 }
7653 else
7654 {
7655 assemble_aligned_integer (4, const0_rtx);
7656 assemble_aligned_integer (4, x);
7657 return true;
7658 }
7659 }
7660 return default_assemble_integer (x, size, aligned_p);
7661 }
7662 \f
7663 /* Return the value of a code used in the .proc pseudo-op that says
7664 what kind of result this function returns. For non-C types, we pick
7665 the closest C type. */
7666
7667 #ifndef SHORT_TYPE_SIZE
7668 #define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7669 #endif
7670
7671 #ifndef INT_TYPE_SIZE
7672 #define INT_TYPE_SIZE BITS_PER_WORD
7673 #endif
7674
7675 #ifndef LONG_TYPE_SIZE
7676 #define LONG_TYPE_SIZE BITS_PER_WORD
7677 #endif
7678
7679 #ifndef LONG_LONG_TYPE_SIZE
7680 #define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7681 #endif
7682
7683 #ifndef FLOAT_TYPE_SIZE
7684 #define FLOAT_TYPE_SIZE BITS_PER_WORD
7685 #endif
7686
7687 #ifndef DOUBLE_TYPE_SIZE
7688 #define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7689 #endif
7690
7691 #ifndef LONG_DOUBLE_TYPE_SIZE
7692 #define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7693 #endif
7694
7695 unsigned long
7696 sparc_type_code (register tree type)
7697 {
7698 register unsigned long qualifiers = 0;
7699 register unsigned shift;
7700
7701 /* Only the first 30 bits of the qualifier are valid. We must refrain from
7702 setting more, since some assemblers will give an error for this. Also,
7703 we must be careful to avoid shifts of 32 bits or more to avoid getting
7704 unpredictable results. */
7705
7706 for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7707 {
7708 switch (TREE_CODE (type))
7709 {
7710 case ERROR_MARK:
7711 return qualifiers;
7712
7713 case ARRAY_TYPE:
7714 qualifiers |= (3 << shift);
7715 break;
7716
7717 case FUNCTION_TYPE:
7718 case METHOD_TYPE:
7719 qualifiers |= (2 << shift);
7720 break;
7721
7722 case POINTER_TYPE:
7723 case REFERENCE_TYPE:
7724 case OFFSET_TYPE:
7725 qualifiers |= (1 << shift);
7726 break;
7727
7728 case RECORD_TYPE:
7729 return (qualifiers | 8);
7730
7731 case UNION_TYPE:
7732 case QUAL_UNION_TYPE:
7733 return (qualifiers | 9);
7734
7735 case ENUMERAL_TYPE:
7736 return (qualifiers | 10);
7737
7738 case VOID_TYPE:
7739 return (qualifiers | 16);
7740
7741 case INTEGER_TYPE:
7742 /* If this is a range type, consider it to be the underlying
7743 type. */
7744 if (TREE_TYPE (type) != 0)
7745 break;
7746
7747 /* Carefully distinguish all the standard types of C,
7748 without messing up if the language is not C. We do this by
7749 testing TYPE_PRECISION and TYPE_UNSIGNED. The old code used to
7750 look at both the names and the above fields, but that's redundant.
7751 Any type whose size is between two C types will be considered
7752 to be the wider of the two types. Also, we do not have a
7753 special code to use for "long long", so anything wider than
7754 long is treated the same. Note that we can't distinguish
7755 between "int" and "long" in this code if they are the same
7756 size, but that's fine, since neither can the assembler. */
7757
7758 if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7759 return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7760
7761 else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7762 return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7763
7764 else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7765 return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7766
7767 else
7768 return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7769
7770 case REAL_TYPE:
7771 /* If this is a range type, consider it to be the underlying
7772 type. */
7773 if (TREE_TYPE (type) != 0)
7774 break;
7775
7776 /* Carefully distinguish all the standard types of C,
7777 without messing up if the language is not C. */
7778
7779 if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7780 return (qualifiers | 6);
7781
7782 else
7783 return (qualifiers | 7);
7784
7785 case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
7786 /* ??? We need to distinguish between double and float complex types,
7787 but I don't know how yet because I can't reach this code from
7788 existing front-ends. */
7789 return (qualifiers | 7); /* Who knows? */
7790
7791 case VECTOR_TYPE:
7792 case BOOLEAN_TYPE: /* Boolean truth value type. */
7793 case LANG_TYPE:
7794 case NULLPTR_TYPE:
7795 return qualifiers;
7796
7797 default:
7798 gcc_unreachable (); /* Not a type! */
7799 }
7800 }
7801
7802 return qualifiers;
7803 }
7804 \f
7805 /* Nested function support. */
7806
7807 /* Emit RTL insns to initialize the variable parts of a trampoline.
7808 FNADDR is an RTX for the address of the function's pure code.
7809 CXT is an RTX for the static chain value for the function.
7810
7811 This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7812 (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7813 (to store insns). This is a bit excessive. Perhaps a different
7814 mechanism would be better here.
7815
7816 Emit enough FLUSH insns to synchronize the data and instruction caches. */
7817
7818 static void
7819 sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7820 {
7821 /* SPARC 32-bit trampoline:
7822
7823 sethi %hi(fn), %g1
7824 sethi %hi(static), %g2
7825 jmp %g1+%lo(fn)
7826 or %g2, %lo(static), %g2
7827
7828 SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7829 JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7830 */
7831
7832 emit_move_insn
7833 (adjust_address (m_tramp, SImode, 0),
7834 expand_binop (SImode, ior_optab,
7835 expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7836 size_int (10), 0, 1),
7837 GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7838 NULL_RTX, 1, OPTAB_DIRECT));
7839
7840 emit_move_insn
7841 (adjust_address (m_tramp, SImode, 4),
7842 expand_binop (SImode, ior_optab,
7843 expand_shift (RSHIFT_EXPR, SImode, cxt,
7844 size_int (10), 0, 1),
7845 GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7846 NULL_RTX, 1, OPTAB_DIRECT));
7847
7848 emit_move_insn
7849 (adjust_address (m_tramp, SImode, 8),
7850 expand_binop (SImode, ior_optab,
7851 expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7852 GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7853 NULL_RTX, 1, OPTAB_DIRECT));
7854
7855 emit_move_insn
7856 (adjust_address (m_tramp, SImode, 12),
7857 expand_binop (SImode, ior_optab,
7858 expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7859 GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7860 NULL_RTX, 1, OPTAB_DIRECT));
7861
7862 /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
7863 aligned on a 16 byte boundary so one flush clears it all. */
7864 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7865 if (sparc_cpu != PROCESSOR_ULTRASPARC
7866 && sparc_cpu != PROCESSOR_ULTRASPARC3
7867 && sparc_cpu != PROCESSOR_NIAGARA
7868 && sparc_cpu != PROCESSOR_NIAGARA2)
7869 emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7870
7871 /* Call __enable_execute_stack after writing onto the stack to make sure
7872 the stack address is accessible. */
7873 #ifdef ENABLE_EXECUTE_STACK
7874 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7875 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7876 #endif
7877
7878 }
7879
7880 /* The 64-bit version is simpler because it makes more sense to load the
7881 values as "immediate" data out of the trampoline. It's also easier since
7882 we can read the PC without clobbering a register. */
7883
7884 static void
7885 sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7886 {
7887 /* SPARC 64-bit trampoline:
7888
7889 rd %pc, %g1
7890 ldx [%g1+24], %g5
7891 jmp %g5
7892 ldx [%g1+16], %g5
7893 +16 bytes data
7894 */
7895
7896 emit_move_insn (adjust_address (m_tramp, SImode, 0),
7897 GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7898 emit_move_insn (adjust_address (m_tramp, SImode, 4),
7899 GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7900 emit_move_insn (adjust_address (m_tramp, SImode, 8),
7901 GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7902 emit_move_insn (adjust_address (m_tramp, SImode, 12),
7903 GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7904 emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7905 emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7906 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7907
7908 if (sparc_cpu != PROCESSOR_ULTRASPARC
7909 && sparc_cpu != PROCESSOR_ULTRASPARC3
7910 && sparc_cpu != PROCESSOR_NIAGARA
7911 && sparc_cpu != PROCESSOR_NIAGARA2)
7912 emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7913
7914 /* Call __enable_execute_stack after writing onto the stack to make sure
7915 the stack address is accessible. */
7916 #ifdef ENABLE_EXECUTE_STACK
7917 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7918 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7919 #endif
7920 }
7921
7922 /* Worker for TARGET_TRAMPOLINE_INIT. */
7923
7924 static void
7925 sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7926 {
7927 rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7928 cxt = force_reg (Pmode, cxt);
7929 if (TARGET_ARCH64)
7930 sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7931 else
7932 sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7933 }
7934 \f
7935 /* Adjust the cost of a scheduling dependency. Return the new cost of
7936 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
7937
7938 static int
7939 supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7940 {
7941 enum attr_type insn_type;
7942
7943 if (! recog_memoized (insn))
7944 return 0;
7945
7946 insn_type = get_attr_type (insn);
7947
7948 if (REG_NOTE_KIND (link) == 0)
7949 {
7950 /* Data dependency; DEP_INSN writes a register that INSN reads some
7951 cycles later. */
7952
7953 /* if a load, then the dependence must be on the memory address;
7954 add an extra "cycle". Note that the cost could be two cycles
7955 if the reg was written late in an instruction group; we ca not tell
7956 here. */
7957 if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7958 return cost + 3;
7959
7960 /* Get the delay only if the address of the store is the dependence. */
7961 if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7962 {
7963 rtx pat = PATTERN(insn);
7964 rtx dep_pat = PATTERN (dep_insn);
7965
7966 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7967 return cost; /* This should not happen! */
7968
7969 /* The dependency between the two instructions was on the data that
7970 is being stored. Assume that this implies that the address of the
7971 store is not dependent. */
7972 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7973 return cost;
7974
7975 return cost + 3; /* An approximation. */
7976 }
7977
7978 /* A shift instruction cannot receive its data from an instruction
7979 in the same cycle; add a one cycle penalty. */
7980 if (insn_type == TYPE_SHIFT)
7981 return cost + 3; /* Split before cascade into shift. */
7982 }
7983 else
7984 {
7985 /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7986 INSN writes some cycles later. */
7987
7988 /* These are only significant for the fpu unit; writing a fp reg before
7989 the fpu has finished with it stalls the processor. */
7990
7991 /* Reusing an integer register causes no problems. */
7992 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7993 return 0;
7994 }
7995
7996 return cost;
7997 }
7998
7999 static int
8000 hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
8001 {
8002 enum attr_type insn_type, dep_type;
8003 rtx pat = PATTERN(insn);
8004 rtx dep_pat = PATTERN (dep_insn);
8005
8006 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
8007 return cost;
8008
8009 insn_type = get_attr_type (insn);
8010 dep_type = get_attr_type (dep_insn);
8011
8012 switch (REG_NOTE_KIND (link))
8013 {
8014 case 0:
8015 /* Data dependency; DEP_INSN writes a register that INSN reads some
8016 cycles later. */
8017
8018 switch (insn_type)
8019 {
8020 case TYPE_STORE:
8021 case TYPE_FPSTORE:
8022 /* Get the delay iff the address of the store is the dependence. */
8023 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
8024 return cost;
8025
8026 if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
8027 return cost;
8028 return cost + 3;
8029
8030 case TYPE_LOAD:
8031 case TYPE_SLOAD:
8032 case TYPE_FPLOAD:
8033 /* If a load, then the dependence must be on the memory address. If
8034 the addresses aren't equal, then it might be a false dependency */
8035 if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
8036 {
8037 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
8038 || GET_CODE (SET_DEST (dep_pat)) != MEM
8039 || GET_CODE (SET_SRC (pat)) != MEM
8040 || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
8041 XEXP (SET_SRC (pat), 0)))
8042 return cost + 2;
8043
8044 return cost + 8;
8045 }
8046 break;
8047
8048 case TYPE_BRANCH:
8049 /* Compare to branch latency is 0. There is no benefit from
8050 separating compare and branch. */
8051 if (dep_type == TYPE_COMPARE)
8052 return 0;
8053 /* Floating point compare to branch latency is less than
8054 compare to conditional move. */
8055 if (dep_type == TYPE_FPCMP)
8056 return cost - 1;
8057 break;
8058 default:
8059 break;
8060 }
8061 break;
8062
8063 case REG_DEP_ANTI:
8064 /* Anti-dependencies only penalize the fpu unit. */
8065 if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
8066 return 0;
8067 break;
8068
8069 default:
8070 break;
8071 }
8072
8073 return cost;
8074 }
8075
8076 static int
8077 sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
8078 {
8079 switch (sparc_cpu)
8080 {
8081 case PROCESSOR_SUPERSPARC:
8082 cost = supersparc_adjust_cost (insn, link, dep, cost);
8083 break;
8084 case PROCESSOR_HYPERSPARC:
8085 case PROCESSOR_SPARCLITE86X:
8086 cost = hypersparc_adjust_cost (insn, link, dep, cost);
8087 break;
8088 default:
8089 break;
8090 }
8091 return cost;
8092 }
8093
8094 static void
8095 sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
8096 int sched_verbose ATTRIBUTE_UNUSED,
8097 int max_ready ATTRIBUTE_UNUSED)
8098 {}
8099
8100 static int
8101 sparc_use_sched_lookahead (void)
8102 {
8103 if (sparc_cpu == PROCESSOR_NIAGARA
8104 || sparc_cpu == PROCESSOR_NIAGARA2)
8105 return 0;
8106 if (sparc_cpu == PROCESSOR_ULTRASPARC
8107 || sparc_cpu == PROCESSOR_ULTRASPARC3)
8108 return 4;
8109 if ((1 << sparc_cpu) &
8110 ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
8111 (1 << PROCESSOR_SPARCLITE86X)))
8112 return 3;
8113 return 0;
8114 }
8115
8116 static int
8117 sparc_issue_rate (void)
8118 {
8119 switch (sparc_cpu)
8120 {
8121 case PROCESSOR_NIAGARA:
8122 case PROCESSOR_NIAGARA2:
8123 default:
8124 return 1;
8125 case PROCESSOR_V9:
8126 /* Assume V9 processors are capable of at least dual-issue. */
8127 return 2;
8128 case PROCESSOR_SUPERSPARC:
8129 return 3;
8130 case PROCESSOR_HYPERSPARC:
8131 case PROCESSOR_SPARCLITE86X:
8132 return 2;
8133 case PROCESSOR_ULTRASPARC:
8134 case PROCESSOR_ULTRASPARC3:
8135 return 4;
8136 }
8137 }
8138
8139 static int
8140 set_extends (rtx insn)
8141 {
8142 register rtx pat = PATTERN (insn);
8143
8144 switch (GET_CODE (SET_SRC (pat)))
8145 {
8146 /* Load and some shift instructions zero extend. */
8147 case MEM:
8148 case ZERO_EXTEND:
8149 /* sethi clears the high bits */
8150 case HIGH:
8151 /* LO_SUM is used with sethi. sethi cleared the high
8152 bits and the values used with lo_sum are positive */
8153 case LO_SUM:
8154 /* Store flag stores 0 or 1 */
8155 case LT: case LTU:
8156 case GT: case GTU:
8157 case LE: case LEU:
8158 case GE: case GEU:
8159 case EQ:
8160 case NE:
8161 return 1;
8162 case AND:
8163 {
8164 rtx op0 = XEXP (SET_SRC (pat), 0);
8165 rtx op1 = XEXP (SET_SRC (pat), 1);
8166 if (GET_CODE (op1) == CONST_INT)
8167 return INTVAL (op1) >= 0;
8168 if (GET_CODE (op0) != REG)
8169 return 0;
8170 if (sparc_check_64 (op0, insn) == 1)
8171 return 1;
8172 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8173 }
8174 case IOR:
8175 case XOR:
8176 {
8177 rtx op0 = XEXP (SET_SRC (pat), 0);
8178 rtx op1 = XEXP (SET_SRC (pat), 1);
8179 if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
8180 return 0;
8181 if (GET_CODE (op1) == CONST_INT)
8182 return INTVAL (op1) >= 0;
8183 return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
8184 }
8185 case LSHIFTRT:
8186 return GET_MODE (SET_SRC (pat)) == SImode;
8187 /* Positive integers leave the high bits zero. */
8188 case CONST_DOUBLE:
8189 return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
8190 case CONST_INT:
8191 return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
8192 case ASHIFTRT:
8193 case SIGN_EXTEND:
8194 return - (GET_MODE (SET_SRC (pat)) == SImode);
8195 case REG:
8196 return sparc_check_64 (SET_SRC (pat), insn);
8197 default:
8198 return 0;
8199 }
8200 }
8201
8202 /* We _ought_ to have only one kind per function, but... */
8203 static GTY(()) rtx sparc_addr_diff_list;
8204 static GTY(()) rtx sparc_addr_list;
8205
8206 void
8207 sparc_defer_case_vector (rtx lab, rtx vec, int diff)
8208 {
8209 vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
8210 if (diff)
8211 sparc_addr_diff_list
8212 = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
8213 else
8214 sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
8215 }
8216
8217 static void
8218 sparc_output_addr_vec (rtx vec)
8219 {
8220 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8221 int idx, vlen = XVECLEN (body, 0);
8222
8223 #ifdef ASM_OUTPUT_ADDR_VEC_START
8224 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8225 #endif
8226
8227 #ifdef ASM_OUTPUT_CASE_LABEL
8228 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8229 NEXT_INSN (lab));
8230 #else
8231 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8232 #endif
8233
8234 for (idx = 0; idx < vlen; idx++)
8235 {
8236 ASM_OUTPUT_ADDR_VEC_ELT
8237 (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
8238 }
8239
8240 #ifdef ASM_OUTPUT_ADDR_VEC_END
8241 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8242 #endif
8243 }
8244
8245 static void
8246 sparc_output_addr_diff_vec (rtx vec)
8247 {
8248 rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
8249 rtx base = XEXP (XEXP (body, 0), 0);
8250 int idx, vlen = XVECLEN (body, 1);
8251
8252 #ifdef ASM_OUTPUT_ADDR_VEC_START
8253 ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
8254 #endif
8255
8256 #ifdef ASM_OUTPUT_CASE_LABEL
8257 ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
8258 NEXT_INSN (lab));
8259 #else
8260 (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
8261 #endif
8262
8263 for (idx = 0; idx < vlen; idx++)
8264 {
8265 ASM_OUTPUT_ADDR_DIFF_ELT
8266 (asm_out_file,
8267 body,
8268 CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
8269 CODE_LABEL_NUMBER (base));
8270 }
8271
8272 #ifdef ASM_OUTPUT_ADDR_VEC_END
8273 ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
8274 #endif
8275 }
8276
8277 static void
8278 sparc_output_deferred_case_vectors (void)
8279 {
8280 rtx t;
8281 int align;
8282
8283 if (sparc_addr_list == NULL_RTX
8284 && sparc_addr_diff_list == NULL_RTX)
8285 return;
8286
8287 /* Align to cache line in the function's code section. */
8288 switch_to_section (current_function_section ());
8289
8290 align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
8291 if (align > 0)
8292 ASM_OUTPUT_ALIGN (asm_out_file, align);
8293
8294 for (t = sparc_addr_list; t ; t = XEXP (t, 1))
8295 sparc_output_addr_vec (XEXP (t, 0));
8296 for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
8297 sparc_output_addr_diff_vec (XEXP (t, 0));
8298
8299 sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
8300 }
8301
8302 /* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
8303 unknown. Return 1 if the high bits are zero, -1 if the register is
8304 sign extended. */
8305 int
8306 sparc_check_64 (rtx x, rtx insn)
8307 {
8308 /* If a register is set only once it is safe to ignore insns this
8309 code does not know how to handle. The loop will either recognize
8310 the single set and return the correct value or fail to recognize
8311 it and return 0. */
8312 int set_once = 0;
8313 rtx y = x;
8314
8315 gcc_assert (GET_CODE (x) == REG);
8316
8317 if (GET_MODE (x) == DImode)
8318 y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
8319
8320 if (flag_expensive_optimizations
8321 && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
8322 set_once = 1;
8323
8324 if (insn == 0)
8325 {
8326 if (set_once)
8327 insn = get_last_insn_anywhere ();
8328 else
8329 return 0;
8330 }
8331
8332 while ((insn = PREV_INSN (insn)))
8333 {
8334 switch (GET_CODE (insn))
8335 {
8336 case JUMP_INSN:
8337 case NOTE:
8338 break;
8339 case CODE_LABEL:
8340 case CALL_INSN:
8341 default:
8342 if (! set_once)
8343 return 0;
8344 break;
8345 case INSN:
8346 {
8347 rtx pat = PATTERN (insn);
8348 if (GET_CODE (pat) != SET)
8349 return 0;
8350 if (rtx_equal_p (x, SET_DEST (pat)))
8351 return set_extends (insn);
8352 if (y && rtx_equal_p (y, SET_DEST (pat)))
8353 return set_extends (insn);
8354 if (reg_overlap_mentioned_p (SET_DEST (pat), y))
8355 return 0;
8356 }
8357 }
8358 }
8359 return 0;
8360 }
8361
8362 /* Returns assembly code to perform a DImode shift using
8363 a 64-bit global or out register on SPARC-V8+. */
8364 const char *
8365 output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
8366 {
8367 static char asm_code[60];
8368
8369 /* The scratch register is only required when the destination
8370 register is not a 64-bit global or out register. */
8371 if (which_alternative != 2)
8372 operands[3] = operands[0];
8373
8374 /* We can only shift by constants <= 63. */
8375 if (GET_CODE (operands[2]) == CONST_INT)
8376 operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
8377
8378 if (GET_CODE (operands[1]) == CONST_INT)
8379 {
8380 output_asm_insn ("mov\t%1, %3", operands);
8381 }
8382 else
8383 {
8384 output_asm_insn ("sllx\t%H1, 32, %3", operands);
8385 if (sparc_check_64 (operands[1], insn) <= 0)
8386 output_asm_insn ("srl\t%L1, 0, %L1", operands);
8387 output_asm_insn ("or\t%L1, %3, %3", operands);
8388 }
8389
8390 strcpy(asm_code, opcode);
8391
8392 if (which_alternative != 2)
8393 return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
8394 else
8395 return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
8396 }
8397 \f
8398 /* Output rtl to increment the profiler label LABELNO
8399 for profiling a function entry. */
8400
8401 void
8402 sparc_profile_hook (int labelno)
8403 {
8404 char buf[32];
8405 rtx lab, fun;
8406
8407 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
8408 if (NO_PROFILE_COUNTERS)
8409 {
8410 emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
8411 }
8412 else
8413 {
8414 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8415 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
8416 emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
8417 }
8418 }
8419 \f
8420 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
8421
8422 static void
8423 sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
8424 tree decl ATTRIBUTE_UNUSED)
8425 {
8426 fprintf (asm_out_file, "\t.section\t\"%s\"", name);
8427
8428 if (!(flags & SECTION_DEBUG))
8429 fputs (",#alloc", asm_out_file);
8430 if (flags & SECTION_WRITE)
8431 fputs (",#write", asm_out_file);
8432 if (flags & SECTION_TLS)
8433 fputs (",#tls", asm_out_file);
8434 if (flags & SECTION_CODE)
8435 fputs (",#execinstr", asm_out_file);
8436
8437 /* ??? Handle SECTION_BSS. */
8438
8439 fputc ('\n', asm_out_file);
8440 }
8441
8442 /* We do not allow indirect calls to be optimized into sibling calls.
8443
8444 We cannot use sibling calls when delayed branches are disabled
8445 because they will likely require the call delay slot to be filled.
8446
8447 Also, on SPARC 32-bit we cannot emit a sibling call when the
8448 current function returns a structure. This is because the "unimp
8449 after call" convention would cause the callee to return to the
8450 wrong place. The generic code already disallows cases where the
8451 function being called returns a structure.
8452
8453 It may seem strange how this last case could occur. Usually there
8454 is code after the call which jumps to epilogue code which dumps the
8455 return value into the struct return area. That ought to invalidate
8456 the sibling call right? Well, in the C++ case we can end up passing
8457 the pointer to the struct return area to a constructor (which returns
8458 void) and then nothing else happens. Such a sibling call would look
8459 valid without the added check here.
8460
8461 VxWorks PIC PLT entries require the global pointer to be initialized
8462 on entry. We therefore can't emit sibling calls to them. */
8463 static bool
8464 sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8465 {
8466 return (decl
8467 && flag_delayed_branch
8468 && (TARGET_ARCH64 || ! cfun->returns_struct)
8469 && !(TARGET_VXWORKS_RTP
8470 && flag_pic
8471 && !targetm.binds_local_p (decl)));
8472 }
8473 \f
8474 /* libfunc renaming. */
8475
8476 static void
8477 sparc_init_libfuncs (void)
8478 {
8479 if (TARGET_ARCH32)
8480 {
8481 /* Use the subroutines that Sun's library provides for integer
8482 multiply and divide. The `*' prevents an underscore from
8483 being prepended by the compiler. .umul is a little faster
8484 than .mul. */
8485 set_optab_libfunc (smul_optab, SImode, "*.umul");
8486 set_optab_libfunc (sdiv_optab, SImode, "*.div");
8487 set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8488 set_optab_libfunc (smod_optab, SImode, "*.rem");
8489 set_optab_libfunc (umod_optab, SImode, "*.urem");
8490
8491 /* TFmode arithmetic. These names are part of the SPARC 32bit ABI. */
8492 set_optab_libfunc (add_optab, TFmode, "_Q_add");
8493 set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8494 set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8495 set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8496 set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8497
8498 /* We can define the TFmode sqrt optab only if TARGET_FPU. This
8499 is because with soft-float, the SFmode and DFmode sqrt
8500 instructions will be absent, and the compiler will notice and
8501 try to use the TFmode sqrt instruction for calls to the
8502 builtin function sqrt, but this fails. */
8503 if (TARGET_FPU)
8504 set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8505
8506 set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8507 set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8508 set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8509 set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8510 set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8511 set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8512
8513 set_conv_libfunc (sext_optab, TFmode, SFmode, "_Q_stoq");
8514 set_conv_libfunc (sext_optab, TFmode, DFmode, "_Q_dtoq");
8515 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_Q_qtos");
8516 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_Q_qtod");
8517
8518 set_conv_libfunc (sfix_optab, SImode, TFmode, "_Q_qtoi");
8519 set_conv_libfunc (ufix_optab, SImode, TFmode, "_Q_qtou");
8520 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8521 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8522
8523 if (DITF_CONVERSION_LIBFUNCS)
8524 {
8525 set_conv_libfunc (sfix_optab, DImode, TFmode, "_Q_qtoll");
8526 set_conv_libfunc (ufix_optab, DImode, TFmode, "_Q_qtoull");
8527 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8528 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8529 }
8530
8531 if (SUN_CONVERSION_LIBFUNCS)
8532 {
8533 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8534 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8535 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8536 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8537 }
8538 }
8539 if (TARGET_ARCH64)
8540 {
8541 /* In the SPARC 64bit ABI, SImode multiply and divide functions
8542 do not exist in the library. Make sure the compiler does not
8543 emit calls to them by accident. (It should always use the
8544 hardware instructions.) */
8545 set_optab_libfunc (smul_optab, SImode, 0);
8546 set_optab_libfunc (sdiv_optab, SImode, 0);
8547 set_optab_libfunc (udiv_optab, SImode, 0);
8548 set_optab_libfunc (smod_optab, SImode, 0);
8549 set_optab_libfunc (umod_optab, SImode, 0);
8550
8551 if (SUN_INTEGER_MULTIPLY_64)
8552 {
8553 set_optab_libfunc (smul_optab, DImode, "__mul64");
8554 set_optab_libfunc (sdiv_optab, DImode, "__div64");
8555 set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8556 set_optab_libfunc (smod_optab, DImode, "__rem64");
8557 set_optab_libfunc (umod_optab, DImode, "__urem64");
8558 }
8559
8560 if (SUN_CONVERSION_LIBFUNCS)
8561 {
8562 set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8563 set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8564 set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8565 set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8566 }
8567 }
8568 }
8569 \f
8570 #define def_builtin(NAME, CODE, TYPE) \
8571 add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8572 NULL_TREE)
8573
8574 /* Implement the TARGET_INIT_BUILTINS target hook.
8575 Create builtin functions for special SPARC instructions. */
8576
8577 static void
8578 sparc_init_builtins (void)
8579 {
8580 if (TARGET_VIS)
8581 sparc_vis_init_builtins ();
8582 }
8583
8584 /* Create builtin functions for VIS 1.0 instructions. */
8585
8586 static void
8587 sparc_vis_init_builtins (void)
8588 {
8589 tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8590 tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8591 tree v4hi = build_vector_type (intHI_type_node, 4);
8592 tree v2hi = build_vector_type (intHI_type_node, 2);
8593 tree v2si = build_vector_type (intSI_type_node, 2);
8594
8595 tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8596 tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8597 tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8598 tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8599 tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8600 tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8601 tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8602 tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8603 tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8604 tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8605 tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8606 tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8607 tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8608 v8qi, v8qi,
8609 intDI_type_node, 0);
8610 tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8611 intDI_type_node,
8612 intDI_type_node, 0);
8613 tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8614 ptr_type_node,
8615 intSI_type_node, 0);
8616 tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8617 ptr_type_node,
8618 intDI_type_node, 0);
8619
8620 /* Packing and expanding vectors. */
8621 def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8622 def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8623 v8qi_ftype_v2si_v8qi);
8624 def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8625 v2hi_ftype_v2si);
8626 def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8627 def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8628 v8qi_ftype_v4qi_v4qi);
8629
8630 /* Multiplications. */
8631 def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8632 v4hi_ftype_v4qi_v4hi);
8633 def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8634 v4hi_ftype_v4qi_v2hi);
8635 def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8636 v4hi_ftype_v4qi_v2hi);
8637 def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8638 v4hi_ftype_v8qi_v4hi);
8639 def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8640 v4hi_ftype_v8qi_v4hi);
8641 def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8642 v2si_ftype_v4qi_v2hi);
8643 def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8644 v2si_ftype_v4qi_v2hi);
8645
8646 /* Data aligning. */
8647 def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8648 v4hi_ftype_v4hi_v4hi);
8649 def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8650 v8qi_ftype_v8qi_v8qi);
8651 def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8652 v2si_ftype_v2si_v2si);
8653 def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8654 di_ftype_di_di);
8655 if (TARGET_ARCH64)
8656 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8657 ptr_ftype_ptr_di);
8658 else
8659 def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8660 ptr_ftype_ptr_si);
8661
8662 /* Pixel distance. */
8663 def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8664 di_ftype_v8qi_v8qi_di);
8665 }
8666
8667 /* Handle TARGET_EXPAND_BUILTIN target hook.
8668 Expand builtin functions for sparc intrinsics. */
8669
8670 static rtx
8671 sparc_expand_builtin (tree exp, rtx target,
8672 rtx subtarget ATTRIBUTE_UNUSED,
8673 enum machine_mode tmode ATTRIBUTE_UNUSED,
8674 int ignore ATTRIBUTE_UNUSED)
8675 {
8676 tree arg;
8677 call_expr_arg_iterator iter;
8678 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8679 unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8680 rtx pat, op[4];
8681 enum machine_mode mode[4];
8682 int arg_count = 0;
8683
8684 mode[0] = insn_data[icode].operand[0].mode;
8685 if (!target
8686 || GET_MODE (target) != mode[0]
8687 || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8688 op[0] = gen_reg_rtx (mode[0]);
8689 else
8690 op[0] = target;
8691
8692 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8693 {
8694 arg_count++;
8695 mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8696 op[arg_count] = expand_normal (arg);
8697
8698 if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8699 mode[arg_count]))
8700 op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8701 }
8702
8703 switch (arg_count)
8704 {
8705 case 1:
8706 pat = GEN_FCN (icode) (op[0], op[1]);
8707 break;
8708 case 2:
8709 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8710 break;
8711 case 3:
8712 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8713 break;
8714 default:
8715 gcc_unreachable ();
8716 }
8717
8718 if (!pat)
8719 return NULL_RTX;
8720
8721 emit_insn (pat);
8722
8723 return op[0];
8724 }
8725
8726 static int
8727 sparc_vis_mul8x16 (int e8, int e16)
8728 {
8729 return (e8 * e16 + 128) / 256;
8730 }
8731
8732 /* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8733 by FNCODE. All of the elements in ELTS0 and ELTS1 lists must be integer
8734 constants. A tree list with the results of the multiplications is returned,
8735 and each element in the list is of INNER_TYPE. */
8736
8737 static tree
8738 sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8739 {
8740 tree n_elts = NULL_TREE;
8741 int scale;
8742
8743 switch (fncode)
8744 {
8745 case CODE_FOR_fmul8x16_vis:
8746 for (; elts0 && elts1;
8747 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8748 {
8749 int val
8750 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8751 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8752 n_elts = tree_cons (NULL_TREE,
8753 build_int_cst (inner_type, val),
8754 n_elts);
8755 }
8756 break;
8757
8758 case CODE_FOR_fmul8x16au_vis:
8759 scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8760
8761 for (; elts0; elts0 = TREE_CHAIN (elts0))
8762 {
8763 int val
8764 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8765 scale);
8766 n_elts = tree_cons (NULL_TREE,
8767 build_int_cst (inner_type, val),
8768 n_elts);
8769 }
8770 break;
8771
8772 case CODE_FOR_fmul8x16al_vis:
8773 scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8774
8775 for (; elts0; elts0 = TREE_CHAIN (elts0))
8776 {
8777 int val
8778 = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8779 scale);
8780 n_elts = tree_cons (NULL_TREE,
8781 build_int_cst (inner_type, val),
8782 n_elts);
8783 }
8784 break;
8785
8786 default:
8787 gcc_unreachable ();
8788 }
8789
8790 return nreverse (n_elts);
8791
8792 }
8793 /* Handle TARGET_FOLD_BUILTIN target hook.
8794 Fold builtin functions for SPARC intrinsics. If IGNORE is true the
8795 result of the function call is ignored. NULL_TREE is returned if the
8796 function could not be folded. */
8797
8798 static tree
8799 sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
8800 tree *args, bool ignore)
8801 {
8802 tree arg0, arg1, arg2;
8803 tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8804 enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8805
8806 if (ignore
8807 && icode != CODE_FOR_alignaddrsi_vis
8808 && icode != CODE_FOR_alignaddrdi_vis)
8809 return build_zero_cst (rtype);
8810
8811 switch (icode)
8812 {
8813 case CODE_FOR_fexpand_vis:
8814 arg0 = args[0];
8815 STRIP_NOPS (arg0);
8816
8817 if (TREE_CODE (arg0) == VECTOR_CST)
8818 {
8819 tree inner_type = TREE_TYPE (rtype);
8820 tree elts = TREE_VECTOR_CST_ELTS (arg0);
8821 tree n_elts = NULL_TREE;
8822
8823 for (; elts; elts = TREE_CHAIN (elts))
8824 {
8825 unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8826 n_elts = tree_cons (NULL_TREE,
8827 build_int_cst (inner_type, val),
8828 n_elts);
8829 }
8830 return build_vector (rtype, nreverse (n_elts));
8831 }
8832 break;
8833
8834 case CODE_FOR_fmul8x16_vis:
8835 case CODE_FOR_fmul8x16au_vis:
8836 case CODE_FOR_fmul8x16al_vis:
8837 arg0 = args[0];
8838 arg1 = args[1];
8839 STRIP_NOPS (arg0);
8840 STRIP_NOPS (arg1);
8841
8842 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8843 {
8844 tree inner_type = TREE_TYPE (rtype);
8845 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8846 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8847 tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8848 elts1);
8849
8850 return build_vector (rtype, n_elts);
8851 }
8852 break;
8853
8854 case CODE_FOR_fpmerge_vis:
8855 arg0 = args[0];
8856 arg1 = args[1];
8857 STRIP_NOPS (arg0);
8858 STRIP_NOPS (arg1);
8859
8860 if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8861 {
8862 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8863 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8864 tree n_elts = NULL_TREE;
8865
8866 for (; elts0 && elts1;
8867 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8868 {
8869 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8870 n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8871 }
8872
8873 return build_vector (rtype, nreverse (n_elts));
8874 }
8875 break;
8876
8877 case CODE_FOR_pdist_vis:
8878 arg0 = args[0];
8879 arg1 = args[1];
8880 arg2 = args[2];
8881 STRIP_NOPS (arg0);
8882 STRIP_NOPS (arg1);
8883 STRIP_NOPS (arg2);
8884
8885 if (TREE_CODE (arg0) == VECTOR_CST
8886 && TREE_CODE (arg1) == VECTOR_CST
8887 && TREE_CODE (arg2) == INTEGER_CST)
8888 {
8889 int overflow = 0;
8890 unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8891 HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8892 tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8893 tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8894
8895 for (; elts0 && elts1;
8896 elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8897 {
8898 unsigned HOST_WIDE_INT
8899 low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8900 low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8901 HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8902 HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8903
8904 unsigned HOST_WIDE_INT l;
8905 HOST_WIDE_INT h;
8906
8907 overflow |= neg_double (low1, high1, &l, &h);
8908 overflow |= add_double (low0, high0, l, h, &l, &h);
8909 if (h < 0)
8910 overflow |= neg_double (l, h, &l, &h);
8911
8912 overflow |= add_double (low, high, l, h, &low, &high);
8913 }
8914
8915 gcc_assert (overflow == 0);
8916
8917 return build_int_cst_wide (rtype, low, high);
8918 }
8919
8920 default:
8921 break;
8922 }
8923
8924 return NULL_TREE;
8925 }
8926 \f
8927 /* ??? This duplicates information provided to the compiler by the
8928 ??? scheduler description. Some day, teach genautomata to output
8929 ??? the latencies and then CSE will just use that. */
8930
8931 static bool
8932 sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8933 bool speed ATTRIBUTE_UNUSED)
8934 {
8935 enum machine_mode mode = GET_MODE (x);
8936 bool float_mode_p = FLOAT_MODE_P (mode);
8937
8938 switch (code)
8939 {
8940 case CONST_INT:
8941 if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8942 {
8943 *total = 0;
8944 return true;
8945 }
8946 /* FALLTHRU */
8947
8948 case HIGH:
8949 *total = 2;
8950 return true;
8951
8952 case CONST:
8953 case LABEL_REF:
8954 case SYMBOL_REF:
8955 *total = 4;
8956 return true;
8957
8958 case CONST_DOUBLE:
8959 if (GET_MODE (x) == VOIDmode
8960 && ((CONST_DOUBLE_HIGH (x) == 0
8961 && CONST_DOUBLE_LOW (x) < 0x1000)
8962 || (CONST_DOUBLE_HIGH (x) == -1
8963 && CONST_DOUBLE_LOW (x) < 0
8964 && CONST_DOUBLE_LOW (x) >= -0x1000)))
8965 *total = 0;
8966 else
8967 *total = 8;
8968 return true;
8969
8970 case MEM:
8971 /* If outer-code was a sign or zero extension, a cost
8972 of COSTS_N_INSNS (1) was already added in. This is
8973 why we are subtracting it back out. */
8974 if (outer_code == ZERO_EXTEND)
8975 {
8976 *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8977 }
8978 else if (outer_code == SIGN_EXTEND)
8979 {
8980 *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8981 }
8982 else if (float_mode_p)
8983 {
8984 *total = sparc_costs->float_load;
8985 }
8986 else
8987 {
8988 *total = sparc_costs->int_load;
8989 }
8990
8991 return true;
8992
8993 case PLUS:
8994 case MINUS:
8995 if (float_mode_p)
8996 *total = sparc_costs->float_plusminus;
8997 else
8998 *total = COSTS_N_INSNS (1);
8999 return false;
9000
9001 case MULT:
9002 if (float_mode_p)
9003 *total = sparc_costs->float_mul;
9004 else if (! TARGET_HARD_MUL)
9005 *total = COSTS_N_INSNS (25);
9006 else
9007 {
9008 int bit_cost;
9009
9010 bit_cost = 0;
9011 if (sparc_costs->int_mul_bit_factor)
9012 {
9013 int nbits;
9014
9015 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
9016 {
9017 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
9018 for (nbits = 0; value != 0; value &= value - 1)
9019 nbits++;
9020 }
9021 else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
9022 && GET_MODE (XEXP (x, 1)) == VOIDmode)
9023 {
9024 rtx x1 = XEXP (x, 1);
9025 unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
9026 unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
9027
9028 for (nbits = 0; value1 != 0; value1 &= value1 - 1)
9029 nbits++;
9030 for (; value2 != 0; value2 &= value2 - 1)
9031 nbits++;
9032 }
9033 else
9034 nbits = 7;
9035
9036 if (nbits < 3)
9037 nbits = 3;
9038 bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
9039 bit_cost = COSTS_N_INSNS (bit_cost);
9040 }
9041
9042 if (mode == DImode)
9043 *total = sparc_costs->int_mulX + bit_cost;
9044 else
9045 *total = sparc_costs->int_mul + bit_cost;
9046 }
9047 return false;
9048
9049 case ASHIFT:
9050 case ASHIFTRT:
9051 case LSHIFTRT:
9052 *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
9053 return false;
9054
9055 case DIV:
9056 case UDIV:
9057 case MOD:
9058 case UMOD:
9059 if (float_mode_p)
9060 {
9061 if (mode == DFmode)
9062 *total = sparc_costs->float_div_df;
9063 else
9064 *total = sparc_costs->float_div_sf;
9065 }
9066 else
9067 {
9068 if (mode == DImode)
9069 *total = sparc_costs->int_divX;
9070 else
9071 *total = sparc_costs->int_div;
9072 }
9073 return false;
9074
9075 case NEG:
9076 if (! float_mode_p)
9077 {
9078 *total = COSTS_N_INSNS (1);
9079 return false;
9080 }
9081 /* FALLTHRU */
9082
9083 case ABS:
9084 case FLOAT:
9085 case UNSIGNED_FLOAT:
9086 case FIX:
9087 case UNSIGNED_FIX:
9088 case FLOAT_EXTEND:
9089 case FLOAT_TRUNCATE:
9090 *total = sparc_costs->float_move;
9091 return false;
9092
9093 case SQRT:
9094 if (mode == DFmode)
9095 *total = sparc_costs->float_sqrt_df;
9096 else
9097 *total = sparc_costs->float_sqrt_sf;
9098 return false;
9099
9100 case COMPARE:
9101 if (float_mode_p)
9102 *total = sparc_costs->float_cmp;
9103 else
9104 *total = COSTS_N_INSNS (1);
9105 return false;
9106
9107 case IF_THEN_ELSE:
9108 if (float_mode_p)
9109 *total = sparc_costs->float_cmove;
9110 else
9111 *total = sparc_costs->int_cmove;
9112 return false;
9113
9114 case IOR:
9115 /* Handle the NAND vector patterns. */
9116 if (sparc_vector_mode_supported_p (GET_MODE (x))
9117 && GET_CODE (XEXP (x, 0)) == NOT
9118 && GET_CODE (XEXP (x, 1)) == NOT)
9119 {
9120 *total = COSTS_N_INSNS (1);
9121 return true;
9122 }
9123 else
9124 return false;
9125
9126 default:
9127 return false;
9128 }
9129 }
9130
9131 /* Return true if CLASS is either GENERAL_REGS or I64_REGS. */
9132
9133 static inline bool
9134 general_or_i64_p (reg_class_t rclass)
9135 {
9136 return (rclass == GENERAL_REGS || rclass == I64_REGS);
9137 }
9138
9139 /* Implement TARGET_REGISTER_MOVE_COST. */
9140
9141 static int
9142 sparc_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9143 reg_class_t from, reg_class_t to)
9144 {
9145 if ((FP_REG_CLASS_P (from) && general_or_i64_p (to))
9146 || (general_or_i64_p (from) && FP_REG_CLASS_P (to))
9147 || from == FPCC_REGS
9148 || to == FPCC_REGS)
9149 {
9150 if (sparc_cpu == PROCESSOR_ULTRASPARC
9151 || sparc_cpu == PROCESSOR_ULTRASPARC3
9152 || sparc_cpu == PROCESSOR_NIAGARA
9153 || sparc_cpu == PROCESSOR_NIAGARA2)
9154 return 12;
9155
9156 return 6;
9157 }
9158
9159 return 2;
9160 }
9161
9162 /* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
9163 This is achieved by means of a manual dynamic stack space allocation in
9164 the current frame. We make the assumption that SEQ doesn't contain any
9165 function calls, with the possible exception of calls to the GOT helper. */
9166
9167 static void
9168 emit_and_preserve (rtx seq, rtx reg, rtx reg2)
9169 {
9170 /* We must preserve the lowest 16 words for the register save area. */
9171 HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
9172 /* We really need only 2 words of fresh stack space. */
9173 HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
9174
9175 rtx slot
9176 = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
9177 SPARC_STACK_BIAS + offset));
9178
9179 emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
9180 emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
9181 if (reg2)
9182 emit_insn (gen_rtx_SET (VOIDmode,
9183 adjust_address (slot, word_mode, UNITS_PER_WORD),
9184 reg2));
9185 emit_insn (seq);
9186 if (reg2)
9187 emit_insn (gen_rtx_SET (VOIDmode,
9188 reg2,
9189 adjust_address (slot, word_mode, UNITS_PER_WORD)));
9190 emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
9191 emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
9192 }
9193
9194 /* Output the assembler code for a thunk function. THUNK_DECL is the
9195 declaration for the thunk function itself, FUNCTION is the decl for
9196 the target function. DELTA is an immediate constant offset to be
9197 added to THIS. If VCALL_OFFSET is nonzero, the word at address
9198 (*THIS + VCALL_OFFSET) should be additionally added to THIS. */
9199
9200 static void
9201 sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9202 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9203 tree function)
9204 {
9205 rtx this_rtx, insn, funexp;
9206 unsigned int int_arg_first;
9207
9208 reload_completed = 1;
9209 epilogue_completed = 1;
9210
9211 emit_note (NOTE_INSN_PROLOGUE_END);
9212
9213 if (flag_delayed_branch)
9214 {
9215 /* We will emit a regular sibcall below, so we need to instruct
9216 output_sibcall that we are in a leaf function. */
9217 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
9218
9219 /* This will cause final.c to invoke leaf_renumber_regs so we
9220 must behave as if we were in a not-yet-leafified function. */
9221 int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
9222 }
9223 else
9224 {
9225 /* We will emit the sibcall manually below, so we will need to
9226 manually spill non-leaf registers. */
9227 sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
9228
9229 /* We really are in a leaf function. */
9230 int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
9231 }
9232
9233 /* Find the "this" pointer. Normally in %o0, but in ARCH64 if the function
9234 returns a structure, the structure return pointer is there instead. */
9235 if (TARGET_ARCH64
9236 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9237 this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
9238 else
9239 this_rtx = gen_rtx_REG (Pmode, int_arg_first);
9240
9241 /* Add DELTA. When possible use a plain add, otherwise load it into
9242 a register first. */
9243 if (delta)
9244 {
9245 rtx delta_rtx = GEN_INT (delta);
9246
9247 if (! SPARC_SIMM13_P (delta))
9248 {
9249 rtx scratch = gen_rtx_REG (Pmode, 1);
9250 emit_move_insn (scratch, delta_rtx);
9251 delta_rtx = scratch;
9252 }
9253
9254 /* THIS_RTX += DELTA. */
9255 emit_insn (gen_add2_insn (this_rtx, delta_rtx));
9256 }
9257
9258 /* Add the word at address (*THIS_RTX + VCALL_OFFSET). */
9259 if (vcall_offset)
9260 {
9261 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9262 rtx scratch = gen_rtx_REG (Pmode, 1);
9263
9264 gcc_assert (vcall_offset < 0);
9265
9266 /* SCRATCH = *THIS_RTX. */
9267 emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
9268
9269 /* Prepare for adding VCALL_OFFSET. The difficulty is that we
9270 may not have any available scratch register at this point. */
9271 if (SPARC_SIMM13_P (vcall_offset))
9272 ;
9273 /* This is the case if ARCH64 (unless -ffixed-g5 is passed). */
9274 else if (! fixed_regs[5]
9275 /* The below sequence is made up of at least 2 insns,
9276 while the default method may need only one. */
9277 && vcall_offset < -8192)
9278 {
9279 rtx scratch2 = gen_rtx_REG (Pmode, 5);
9280 emit_move_insn (scratch2, vcall_offset_rtx);
9281 vcall_offset_rtx = scratch2;
9282 }
9283 else
9284 {
9285 rtx increment = GEN_INT (-4096);
9286
9287 /* VCALL_OFFSET is a negative number whose typical range can be
9288 estimated as -32768..0 in 32-bit mode. In almost all cases
9289 it is therefore cheaper to emit multiple add insns than
9290 spilling and loading the constant into a register (at least
9291 6 insns). */
9292 while (! SPARC_SIMM13_P (vcall_offset))
9293 {
9294 emit_insn (gen_add2_insn (scratch, increment));
9295 vcall_offset += 4096;
9296 }
9297 vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
9298 }
9299
9300 /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET). */
9301 emit_move_insn (scratch, gen_rtx_MEM (Pmode,
9302 gen_rtx_PLUS (Pmode,
9303 scratch,
9304 vcall_offset_rtx)));
9305
9306 /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET). */
9307 emit_insn (gen_add2_insn (this_rtx, scratch));
9308 }
9309
9310 /* Generate a tail call to the target function. */
9311 if (! TREE_USED (function))
9312 {
9313 assemble_external (function);
9314 TREE_USED (function) = 1;
9315 }
9316 funexp = XEXP (DECL_RTL (function), 0);
9317
9318 if (flag_delayed_branch)
9319 {
9320 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9321 insn = emit_call_insn (gen_sibcall (funexp));
9322 SIBLING_CALL_P (insn) = 1;
9323 }
9324 else
9325 {
9326 /* The hoops we have to jump through in order to generate a sibcall
9327 without using delay slots... */
9328 rtx spill_reg, seq, scratch = gen_rtx_REG (Pmode, 1);
9329
9330 if (flag_pic)
9331 {
9332 spill_reg = gen_rtx_REG (word_mode, 15); /* %o7 */
9333 start_sequence ();
9334 /* Delay emitting the GOT helper function because it needs to
9335 change the section and we are emitting assembly code. */
9336 load_got_register (); /* clobbers %o7 */
9337 scratch = sparc_legitimize_pic_address (funexp, scratch);
9338 seq = get_insns ();
9339 end_sequence ();
9340 emit_and_preserve (seq, spill_reg, pic_offset_table_rtx);
9341 }
9342 else if (TARGET_ARCH32)
9343 {
9344 emit_insn (gen_rtx_SET (VOIDmode,
9345 scratch,
9346 gen_rtx_HIGH (SImode, funexp)));
9347 emit_insn (gen_rtx_SET (VOIDmode,
9348 scratch,
9349 gen_rtx_LO_SUM (SImode, scratch, funexp)));
9350 }
9351 else /* TARGET_ARCH64 */
9352 {
9353 switch (sparc_cmodel)
9354 {
9355 case CM_MEDLOW:
9356 case CM_MEDMID:
9357 /* The destination can serve as a temporary. */
9358 sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
9359 break;
9360
9361 case CM_MEDANY:
9362 case CM_EMBMEDANY:
9363 /* The destination cannot serve as a temporary. */
9364 spill_reg = gen_rtx_REG (DImode, 15); /* %o7 */
9365 start_sequence ();
9366 sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
9367 seq = get_insns ();
9368 end_sequence ();
9369 emit_and_preserve (seq, spill_reg, 0);
9370 break;
9371
9372 default:
9373 gcc_unreachable ();
9374 }
9375 }
9376
9377 emit_jump_insn (gen_indirect_jump (scratch));
9378 }
9379
9380 emit_barrier ();
9381
9382 /* Run just enough of rest_of_compilation to get the insns emitted.
9383 There's not really enough bulk here to make other passes such as
9384 instruction scheduling worth while. Note that use_thunk calls
9385 assemble_start_function and assemble_end_function. */
9386 insn = get_insns ();
9387 insn_locators_alloc ();
9388 shorten_branches (insn);
9389 final_start_function (insn, file, 1);
9390 final (insn, file, 1);
9391 final_end_function ();
9392
9393 reload_completed = 0;
9394 epilogue_completed = 0;
9395 }
9396
9397 /* Return true if sparc_output_mi_thunk would be able to output the
9398 assembler code for the thunk function specified by the arguments
9399 it is passed, and false otherwise. */
9400 static bool
9401 sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
9402 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
9403 HOST_WIDE_INT vcall_offset,
9404 const_tree function ATTRIBUTE_UNUSED)
9405 {
9406 /* Bound the loop used in the default method above. */
9407 return (vcall_offset >= -32768 || ! fixed_regs[5]);
9408 }
9409
9410 /* How to allocate a 'struct machine_function'. */
9411
9412 static struct machine_function *
9413 sparc_init_machine_status (void)
9414 {
9415 return ggc_alloc_cleared_machine_function ();
9416 }
9417
9418 /* Locate some local-dynamic symbol still in use by this function
9419 so that we can print its name in local-dynamic base patterns. */
9420
9421 static const char *
9422 get_some_local_dynamic_name (void)
9423 {
9424 rtx insn;
9425
9426 if (cfun->machine->some_ld_name)
9427 return cfun->machine->some_ld_name;
9428
9429 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9430 if (INSN_P (insn)
9431 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9432 return cfun->machine->some_ld_name;
9433
9434 gcc_unreachable ();
9435 }
9436
9437 static int
9438 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9439 {
9440 rtx x = *px;
9441
9442 if (x
9443 && GET_CODE (x) == SYMBOL_REF
9444 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9445 {
9446 cfun->machine->some_ld_name = XSTR (x, 0);
9447 return 1;
9448 }
9449
9450 return 0;
9451 }
9452
9453 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
9454 This is called from dwarf2out.c to emit call frame instructions
9455 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9456 static void
9457 sparc_dwarf_handle_frame_unspec (const char *label,
9458 rtx pattern ATTRIBUTE_UNUSED,
9459 int index ATTRIBUTE_UNUSED)
9460 {
9461 gcc_assert (index == UNSPECV_SAVEW);
9462 dwarf2out_window_save (label);
9463 }
9464
9465 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9466 We need to emit DTP-relative relocations. */
9467
9468 static void
9469 sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9470 {
9471 switch (size)
9472 {
9473 case 4:
9474 fputs ("\t.word\t%r_tls_dtpoff32(", file);
9475 break;
9476 case 8:
9477 fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9478 break;
9479 default:
9480 gcc_unreachable ();
9481 }
9482 output_addr_const (file, x);
9483 fputs (")", file);
9484 }
9485
9486 /* Do whatever processing is required at the end of a file. */
9487
9488 static void
9489 sparc_file_end (void)
9490 {
9491 /* If we need to emit the special GOT helper function, do so now. */
9492 if (got_helper_rtx)
9493 {
9494 const char *name = XSTR (got_helper_rtx, 0);
9495 const char *reg_name = reg_names[GLOBAL_OFFSET_TABLE_REGNUM];
9496 #ifdef DWARF2_UNWIND_INFO
9497 bool do_cfi;
9498 #endif
9499
9500 if (USE_HIDDEN_LINKONCE)
9501 {
9502 tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9503 get_identifier (name),
9504 build_function_type (void_type_node,
9505 void_list_node));
9506 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9507 NULL_TREE, void_type_node);
9508 TREE_STATIC (decl) = 1;
9509 make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9510 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9511 DECL_VISIBILITY_SPECIFIED (decl) = 1;
9512 resolve_unique_section (decl, 0, flag_function_sections);
9513 allocate_struct_function (decl, true);
9514 cfun->is_thunk = 1;
9515 current_function_decl = decl;
9516 init_varasm_status ();
9517 assemble_start_function (decl, name);
9518 }
9519 else
9520 {
9521 const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9522 switch_to_section (text_section);
9523 if (align > 0)
9524 ASM_OUTPUT_ALIGN (asm_out_file, align);
9525 ASM_OUTPUT_LABEL (asm_out_file, name);
9526 }
9527
9528 #ifdef DWARF2_UNWIND_INFO
9529 do_cfi = dwarf2out_do_cfi_asm ();
9530 if (do_cfi)
9531 fprintf (asm_out_file, "\t.cfi_startproc\n");
9532 #endif
9533 if (flag_delayed_branch)
9534 fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9535 reg_name, reg_name);
9536 else
9537 fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9538 reg_name, reg_name);
9539 #ifdef DWARF2_UNWIND_INFO
9540 if (do_cfi)
9541 fprintf (asm_out_file, "\t.cfi_endproc\n");
9542 #endif
9543 }
9544
9545 if (NEED_INDICATE_EXEC_STACK)
9546 file_end_indicate_exec_stack ();
9547 }
9548
9549 #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9550 /* Implement TARGET_MANGLE_TYPE. */
9551
9552 static const char *
9553 sparc_mangle_type (const_tree type)
9554 {
9555 if (!TARGET_64BIT
9556 && TYPE_MAIN_VARIANT (type) == long_double_type_node
9557 && TARGET_LONG_DOUBLE_128)
9558 return "g";
9559
9560 /* For all other types, use normal C++ mangling. */
9561 return NULL;
9562 }
9563 #endif
9564
9565 /* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9566 compare and swap on the word containing the byte or half-word. */
9567
9568 void
9569 sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9570 {
9571 rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9572 rtx addr = gen_reg_rtx (Pmode);
9573 rtx off = gen_reg_rtx (SImode);
9574 rtx oldv = gen_reg_rtx (SImode);
9575 rtx newv = gen_reg_rtx (SImode);
9576 rtx oldvalue = gen_reg_rtx (SImode);
9577 rtx newvalue = gen_reg_rtx (SImode);
9578 rtx res = gen_reg_rtx (SImode);
9579 rtx resv = gen_reg_rtx (SImode);
9580 rtx memsi, val, mask, end_label, loop_label, cc;
9581
9582 emit_insn (gen_rtx_SET (VOIDmode, addr,
9583 gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9584
9585 if (Pmode != SImode)
9586 addr1 = gen_lowpart (SImode, addr1);
9587 emit_insn (gen_rtx_SET (VOIDmode, off,
9588 gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9589
9590 memsi = gen_rtx_MEM (SImode, addr);
9591 set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9592 MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9593
9594 val = force_reg (SImode, memsi);
9595
9596 emit_insn (gen_rtx_SET (VOIDmode, off,
9597 gen_rtx_XOR (SImode, off,
9598 GEN_INT (GET_MODE (mem) == QImode
9599 ? 3 : 2))));
9600
9601 emit_insn (gen_rtx_SET (VOIDmode, off,
9602 gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9603
9604 if (GET_MODE (mem) == QImode)
9605 mask = force_reg (SImode, GEN_INT (0xff));
9606 else
9607 mask = force_reg (SImode, GEN_INT (0xffff));
9608
9609 emit_insn (gen_rtx_SET (VOIDmode, mask,
9610 gen_rtx_ASHIFT (SImode, mask, off)));
9611
9612 emit_insn (gen_rtx_SET (VOIDmode, val,
9613 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9614 val)));
9615
9616 oldval = gen_lowpart (SImode, oldval);
9617 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9618 gen_rtx_ASHIFT (SImode, oldval, off)));
9619
9620 newval = gen_lowpart_common (SImode, newval);
9621 emit_insn (gen_rtx_SET (VOIDmode, newv,
9622 gen_rtx_ASHIFT (SImode, newval, off)));
9623
9624 emit_insn (gen_rtx_SET (VOIDmode, oldv,
9625 gen_rtx_AND (SImode, oldv, mask)));
9626
9627 emit_insn (gen_rtx_SET (VOIDmode, newv,
9628 gen_rtx_AND (SImode, newv, mask)));
9629
9630 end_label = gen_label_rtx ();
9631 loop_label = gen_label_rtx ();
9632 emit_label (loop_label);
9633
9634 emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9635 gen_rtx_IOR (SImode, oldv, val)));
9636
9637 emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9638 gen_rtx_IOR (SImode, newv, val)));
9639
9640 emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9641
9642 emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9643
9644 emit_insn (gen_rtx_SET (VOIDmode, resv,
9645 gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9646 res)));
9647
9648 cc = gen_compare_reg_1 (NE, resv, val);
9649 emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9650
9651 /* Use cbranchcc4 to separate the compare and branch! */
9652 emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9653 cc, const0_rtx, loop_label));
9654
9655 emit_label (end_label);
9656
9657 emit_insn (gen_rtx_SET (VOIDmode, res,
9658 gen_rtx_AND (SImode, res, mask)));
9659
9660 emit_insn (gen_rtx_SET (VOIDmode, res,
9661 gen_rtx_LSHIFTRT (SImode, res, off)));
9662
9663 emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9664 }
9665
9666 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
9667
9668 bool
9669 sparc_frame_pointer_required (void)
9670 {
9671 return !(leaf_function_p () && only_leaf_regs_used ());
9672 }
9673
9674 /* The way this is structured, we can't eliminate SFP in favor of SP
9675 if the frame pointer is required: we want to use the SFP->HFP elimination
9676 in that case. But the test in update_eliminables doesn't know we are
9677 assuming below that we only do the former elimination. */
9678
9679 bool
9680 sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9681 {
9682 return (to == HARD_FRAME_POINTER_REGNUM
9683 || !targetm.frame_pointer_required ());
9684 }
9685
9686 /* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
9687 they won't be allocated. */
9688
9689 static void
9690 sparc_conditional_register_usage (void)
9691 {
9692 if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
9693 {
9694 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9695 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
9696 }
9697 /* If the user has passed -f{fixed,call-{used,saved}}-g5 */
9698 /* then honor it. */
9699 if (TARGET_ARCH32 && fixed_regs[5])
9700 fixed_regs[5] = 1;
9701 else if (TARGET_ARCH64 && fixed_regs[5] == 2)
9702 fixed_regs[5] = 0;
9703 if (! TARGET_V9)
9704 {
9705 int regno;
9706 for (regno = SPARC_FIRST_V9_FP_REG;
9707 regno <= SPARC_LAST_V9_FP_REG;
9708 regno++)
9709 fixed_regs[regno] = 1;
9710 /* %fcc0 is used by v8 and v9. */
9711 for (regno = SPARC_FIRST_V9_FCC_REG + 1;
9712 regno <= SPARC_LAST_V9_FCC_REG;
9713 regno++)
9714 fixed_regs[regno] = 1;
9715 }
9716 if (! TARGET_FPU)
9717 {
9718 int regno;
9719 for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++)
9720 fixed_regs[regno] = 1;
9721 }
9722 /* If the user has passed -f{fixed,call-{used,saved}}-g2 */
9723 /* then honor it. Likewise with g3 and g4. */
9724 if (fixed_regs[2] == 2)
9725 fixed_regs[2] = ! TARGET_APP_REGS;
9726 if (fixed_regs[3] == 2)
9727 fixed_regs[3] = ! TARGET_APP_REGS;
9728 if (TARGET_ARCH32 && fixed_regs[4] == 2)
9729 fixed_regs[4] = ! TARGET_APP_REGS;
9730 else if (TARGET_CM_EMBMEDANY)
9731 fixed_regs[4] = 1;
9732 else if (fixed_regs[4] == 2)
9733 fixed_regs[4] = 0;
9734 }
9735
9736 /* Implement TARGET_PREFERRED_RELOAD_CLASS
9737
9738 - We can't load constants into FP registers.
9739 - We can't load FP constants into integer registers when soft-float,
9740 because there is no soft-float pattern with a r/F constraint.
9741 - We can't load FP constants into integer registers for TFmode unless
9742 it is 0.0L, because there is no movtf pattern with a r/F constraint.
9743 - Try and reload integer constants (symbolic or otherwise) back into
9744 registers directly, rather than having them dumped to memory. */
9745
9746 static reg_class_t
9747 sparc_preferred_reload_class (rtx x, reg_class_t rclass)
9748 {
9749 if (CONSTANT_P (x))
9750 {
9751 if (FP_REG_CLASS_P (rclass)
9752 || rclass == GENERAL_OR_FP_REGS
9753 || rclass == GENERAL_OR_EXTRA_FP_REGS
9754 || (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT && ! TARGET_FPU)
9755 || (GET_MODE (x) == TFmode && ! const_zero_operand (x, TFmode)))
9756 return NO_REGS;
9757
9758 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
9759 return GENERAL_REGS;
9760 }
9761
9762 return rclass;
9763 }
9764
9765 #include "gt-sparc.h"