f66cdc255afc4f111d51b3e4df6d8488fa2e1865
[gcc.git] / gcc / config / mips / mips.c
1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
15
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
25
26 #include "config.h"
27 #include "system.h"
28 #include "coretypes.h"
29 #include "tm.h"
30 #include <signal.h>
31 #include "rtl.h"
32 #include "regs.h"
33 #include "hard-reg-set.h"
34 #include "real.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
38 #include "recog.h"
39 #include "toplev.h"
40 #include "output.h"
41 #include "tree.h"
42 #include "function.h"
43 #include "expr.h"
44 #include "optabs.h"
45 #include "flags.h"
46 #include "reload.h"
47 #include "tm_p.h"
48 #include "ggc.h"
49 #include "gstab.h"
50 #include "hashtab.h"
51 #include "debug.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
59 #include "bitmap.h"
60
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
69 XVECEXP (X, 0, 0)
70
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save & restore registers.
77
78 Use a maximum gap of 0x100 in the mips16 case. We can then use
79 unextended instructions to save and restore registers, and to
80 allocate and deallocate the top part of the frame.
81
82 The value in the !mips16 case must be a SMALL_OPERAND and must
83 preserve the maximum stack alignment. */
84 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85
86 /* True if INSN is a mips.md pattern or asm statement. */
87 #define USEFUL_INSN_P(INSN) \
88 (INSN_P (INSN) \
89 && GET_CODE (PATTERN (INSN)) != USE \
90 && GET_CODE (PATTERN (INSN)) != CLOBBER \
91 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
92 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93
94 /* If INSN is a delayed branch sequence, return the first instruction
95 in the sequence, otherwise return INSN itself. */
96 #define SEQ_BEGIN(INSN) \
97 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
98 ? XVECEXP (PATTERN (INSN), 0, 0) \
99 : (INSN))
100
101 /* Likewise for the last instruction in a delayed branch sequence. */
102 #define SEQ_END(INSN) \
103 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
104 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
105 : (INSN))
106
107 /* Execute the following loop body with SUBINSN set to each instruction
108 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
109 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
110 for ((SUBINSN) = SEQ_BEGIN (INSN); \
111 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
112 (SUBINSN) = NEXT_INSN (SUBINSN))
113
114 /* Classifies an address.
115
116 ADDRESS_REG
117 A natural register + offset address. The register satisfies
118 mips_valid_base_register_p and the offset is a const_arith_operand.
119
120 ADDRESS_LO_SUM
121 A LO_SUM rtx. The first operand is a valid base register and
122 the second operand is a symbolic address.
123
124 ADDRESS_CONST_INT
125 A signed 16-bit constant address.
126
127 ADDRESS_SYMBOLIC:
128 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
129 enum mips_address_type {
130 ADDRESS_REG,
131 ADDRESS_LO_SUM,
132 ADDRESS_CONST_INT,
133 ADDRESS_SYMBOLIC
134 };
135
136 /* Classifies the prototype of a builtin function. */
137 enum mips_function_type
138 {
139 MIPS_V2SF_FTYPE_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143 MIPS_V2SF_FTYPE_SF_SF,
144 MIPS_INT_FTYPE_V2SF_V2SF,
145 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146 MIPS_INT_FTYPE_SF_SF,
147 MIPS_INT_FTYPE_DF_DF,
148 MIPS_SF_FTYPE_V2SF,
149 MIPS_SF_FTYPE_SF,
150 MIPS_SF_FTYPE_SF_SF,
151 MIPS_DF_FTYPE_DF,
152 MIPS_DF_FTYPE_DF_DF,
153
154 /* For MIPS DSP ASE */
155 MIPS_DI_FTYPE_DI_SI,
156 MIPS_DI_FTYPE_DI_SI_SI,
157 MIPS_DI_FTYPE_DI_V2HI_V2HI,
158 MIPS_DI_FTYPE_DI_V4QI_V4QI,
159 MIPS_SI_FTYPE_DI_SI,
160 MIPS_SI_FTYPE_PTR_SI,
161 MIPS_SI_FTYPE_SI,
162 MIPS_SI_FTYPE_SI_SI,
163 MIPS_SI_FTYPE_V2HI,
164 MIPS_SI_FTYPE_V2HI_V2HI,
165 MIPS_SI_FTYPE_V4QI,
166 MIPS_SI_FTYPE_V4QI_V4QI,
167 MIPS_SI_FTYPE_VOID,
168 MIPS_V2HI_FTYPE_SI,
169 MIPS_V2HI_FTYPE_SI_SI,
170 MIPS_V2HI_FTYPE_V2HI,
171 MIPS_V2HI_FTYPE_V2HI_SI,
172 MIPS_V2HI_FTYPE_V2HI_V2HI,
173 MIPS_V2HI_FTYPE_V4QI,
174 MIPS_V2HI_FTYPE_V4QI_V2HI,
175 MIPS_V4QI_FTYPE_SI,
176 MIPS_V4QI_FTYPE_V2HI_V2HI,
177 MIPS_V4QI_FTYPE_V4QI_SI,
178 MIPS_V4QI_FTYPE_V4QI_V4QI,
179 MIPS_VOID_FTYPE_SI_SI,
180 MIPS_VOID_FTYPE_V2HI_V2HI,
181 MIPS_VOID_FTYPE_V4QI_V4QI,
182
183 /* The last type. */
184 MIPS_MAX_FTYPE_MAX
185 };
186
187 /* Specifies how a builtin function should be converted into rtl. */
188 enum mips_builtin_type
189 {
190 /* The builtin corresponds directly to an .md pattern. The return
191 value is mapped to operand 0 and the arguments are mapped to
192 operands 1 and above. */
193 MIPS_BUILTIN_DIRECT,
194
195 /* The builtin corresponds directly to an .md pattern. There is no return
196 value and the arguments are mapped to operands 0 and above. */
197 MIPS_BUILTIN_DIRECT_NO_TARGET,
198
199 /* The builtin corresponds to a comparison instruction followed by
200 a mips_cond_move_tf_ps pattern. The first two arguments are the
201 values to compare and the second two arguments are the vector
202 operands for the movt.ps or movf.ps instruction (in assembly order). */
203 MIPS_BUILTIN_MOVF,
204 MIPS_BUILTIN_MOVT,
205
206 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
207 of this instruction is the result of the comparison, which has mode
208 CCV2 or CCV4. The function arguments are mapped to operands 1 and
209 above. The function's return value is an SImode boolean that is
210 true under the following conditions:
211
212 MIPS_BUILTIN_CMP_ANY: one of the registers is true
213 MIPS_BUILTIN_CMP_ALL: all of the registers are true
214 MIPS_BUILTIN_CMP_LOWER: the first register is true
215 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
216 MIPS_BUILTIN_CMP_ANY,
217 MIPS_BUILTIN_CMP_ALL,
218 MIPS_BUILTIN_CMP_UPPER,
219 MIPS_BUILTIN_CMP_LOWER,
220
221 /* As above, but the instruction only sets a single $fcc register. */
222 MIPS_BUILTIN_CMP_SINGLE,
223
224 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
225 MIPS_BUILTIN_BPOSGE32
226 };
227
228 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
229 #define MIPS_FP_CONDITIONS(MACRO) \
230 MACRO (f), \
231 MACRO (un), \
232 MACRO (eq), \
233 MACRO (ueq), \
234 MACRO (olt), \
235 MACRO (ult), \
236 MACRO (ole), \
237 MACRO (ule), \
238 MACRO (sf), \
239 MACRO (ngle), \
240 MACRO (seq), \
241 MACRO (ngl), \
242 MACRO (lt), \
243 MACRO (nge), \
244 MACRO (le), \
245 MACRO (ngt)
246
247 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
248 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
249 enum mips_fp_condition {
250 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
251 };
252
253 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
254 #define STRINGIFY(X) #X
255 static const char *const mips_fp_conditions[] = {
256 MIPS_FP_CONDITIONS (STRINGIFY)
257 };
258
259 /* A function to save or store a register. The first argument is the
260 register and the second is the stack slot. */
261 typedef void (*mips_save_restore_fn) (rtx, rtx);
262
263 struct mips16_constant;
264 struct mips_arg_info;
265 struct mips_address_info;
266 struct mips_integer_op;
267 struct mips_sim;
268
269 static enum mips_symbol_type mips_classify_symbol (rtx);
270 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
271 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
272 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
273 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
274 static bool mips_classify_address (struct mips_address_info *, rtx,
275 enum machine_mode, int);
276 static bool mips_cannot_force_const_mem (rtx);
277 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
278 static int mips_symbol_insns (enum mips_symbol_type);
279 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
280 static rtx mips_force_temporary (rtx, rtx);
281 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
282 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
283 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
284 static unsigned int mips_build_lower (struct mips_integer_op *,
285 unsigned HOST_WIDE_INT);
286 static unsigned int mips_build_integer (struct mips_integer_op *,
287 unsigned HOST_WIDE_INT);
288 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
289 static int m16_check_op (rtx, int, int, int);
290 static bool mips_rtx_costs (rtx, int, int, int *);
291 static int mips_address_cost (rtx);
292 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
293 static void mips_load_call_address (rtx, rtx, int);
294 static bool mips_function_ok_for_sibcall (tree, tree);
295 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
296 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
297 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
298 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
299 tree, int, struct mips_arg_info *);
300 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
301 static void mips_set_architecture (const struct mips_cpu_info *);
302 static void mips_set_tune (const struct mips_cpu_info *);
303 static bool mips_handle_option (size_t, const char *, int);
304 static struct machine_function *mips_init_machine_status (void);
305 static void print_operand_reloc (FILE *, rtx, const char **);
306 #if TARGET_IRIX
307 static void irix_output_external_libcall (rtx);
308 #endif
309 static void mips_file_start (void);
310 static void mips_file_end (void);
311 static bool mips_rewrite_small_data_p (rtx);
312 static int mips_small_data_pattern_1 (rtx *, void *);
313 static int mips_rewrite_small_data_1 (rtx *, void *);
314 static bool mips_function_has_gp_insn (void);
315 static unsigned int mips_global_pointer (void);
316 static bool mips_save_reg_p (unsigned int);
317 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
318 mips_save_restore_fn);
319 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
320 static void mips_output_cplocal (void);
321 static void mips_emit_loadgp (void);
322 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
323 static void mips_set_frame_expr (rtx);
324 static rtx mips_frame_set (rtx, rtx);
325 static void mips_save_reg (rtx, rtx);
326 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
327 static void mips_restore_reg (rtx, rtx);
328 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
329 HOST_WIDE_INT, tree);
330 static int symbolic_expression_p (rtx);
331 static section *mips_select_rtx_section (enum machine_mode, rtx,
332 unsigned HOST_WIDE_INT);
333 static section *mips_function_rodata_section (tree);
334 static bool mips_in_small_data_p (tree);
335 static bool mips_use_anchors_for_symbol_p (rtx);
336 static int mips_fpr_return_fields (tree, tree *);
337 static bool mips_return_in_msb (tree);
338 static rtx mips_return_fpr_pair (enum machine_mode mode,
339 enum machine_mode mode1, HOST_WIDE_INT,
340 enum machine_mode mode2, HOST_WIDE_INT);
341 static rtx mips16_gp_pseudo_reg (void);
342 static void mips16_fp_args (FILE *, int, int);
343 static void build_mips16_function_stub (FILE *);
344 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
345 static void dump_constants (struct mips16_constant *, rtx);
346 static int mips16_insn_length (rtx);
347 static int mips16_rewrite_pool_refs (rtx *, void *);
348 static void mips16_lay_out_constants (void);
349 static void mips_sim_reset (struct mips_sim *);
350 static void mips_sim_init (struct mips_sim *, state_t);
351 static void mips_sim_next_cycle (struct mips_sim *);
352 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
353 static int mips_sim_wait_regs_2 (rtx *, void *);
354 static void mips_sim_wait_regs_1 (rtx *, void *);
355 static void mips_sim_wait_regs (struct mips_sim *, rtx);
356 static void mips_sim_wait_units (struct mips_sim *, rtx);
357 static void mips_sim_wait_insn (struct mips_sim *, rtx);
358 static void mips_sim_record_set (rtx, rtx, void *);
359 static void mips_sim_issue_insn (struct mips_sim *, rtx);
360 static void mips_sim_issue_nop (struct mips_sim *);
361 static void mips_sim_finish_insn (struct mips_sim *, rtx);
362 static void vr4130_avoid_branch_rt_conflict (rtx);
363 static void vr4130_align_insns (void);
364 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
365 static void mips_avoid_hazards (void);
366 static void mips_reorg (void);
367 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
368 static bool mips_matching_cpu_name_p (const char *, const char *);
369 static const struct mips_cpu_info *mips_parse_cpu (const char *);
370 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
371 static bool mips_return_in_memory (tree, tree);
372 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
373 static void mips_macc_chains_record (rtx);
374 static void mips_macc_chains_reorder (rtx *, int);
375 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
376 static bool vr4130_true_reg_dependence_p (rtx);
377 static bool vr4130_swap_insns_p (rtx, rtx);
378 static void vr4130_reorder (rtx *, int);
379 static void mips_promote_ready (rtx *, int, int);
380 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
381 static int mips_variable_issue (FILE *, int, rtx, int);
382 static int mips_adjust_cost (rtx, rtx, rtx, int);
383 static int mips_issue_rate (void);
384 static int mips_multipass_dfa_lookahead (void);
385 static void mips_init_libfuncs (void);
386 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
387 tree, int *, int);
388 static tree mips_build_builtin_va_list (void);
389 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
390 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
391 tree, bool);
392 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
393 tree, bool);
394 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
395 tree, bool);
396 static bool mips_valid_pointer_mode (enum machine_mode);
397 static bool mips_vector_mode_supported_p (enum machine_mode);
398 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
399 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
400 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
401 static void mips_init_builtins (void);
402 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
403 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
404 enum insn_code, enum mips_fp_condition,
405 rtx, tree);
406 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
407 enum insn_code, enum mips_fp_condition,
408 rtx, tree);
409 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
410 static void mips_encode_section_info (tree, rtx, int);
411 static void mips_extra_live_on_entry (bitmap);
412 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
413
414 /* Structure to be filled in by compute_frame_size with register
415 save masks, and offsets for the current function. */
416
417 struct mips_frame_info GTY(())
418 {
419 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
420 HOST_WIDE_INT var_size; /* # bytes that variables take up */
421 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
422 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
423 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
424 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
425 unsigned int mask; /* mask of saved gp registers */
426 unsigned int fmask; /* mask of saved fp registers */
427 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
428 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
429 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
430 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
431 bool initialized; /* true if frame size already calculated */
432 int num_gp; /* number of gp registers saved */
433 int num_fp; /* number of fp registers saved */
434 };
435
436 struct machine_function GTY(()) {
437 /* Pseudo-reg holding the value of $28 in a mips16 function which
438 refers to GP relative global variables. */
439 rtx mips16_gp_pseudo_rtx;
440
441 /* The number of extra stack bytes taken up by register varargs.
442 This area is allocated by the callee at the very top of the frame. */
443 int varargs_size;
444
445 /* Current frame information, calculated by compute_frame_size. */
446 struct mips_frame_info frame;
447
448 /* The register to use as the global pointer within this function. */
449 unsigned int global_pointer;
450
451 /* True if mips_adjust_insn_length should ignore an instruction's
452 hazard attribute. */
453 bool ignore_hazard_length_p;
454
455 /* True if the whole function is suitable for .set noreorder and
456 .set nomacro. */
457 bool all_noreorder_p;
458
459 /* True if the function is known to have an instruction that needs $gp. */
460 bool has_gp_insn_p;
461 };
462
463 /* Information about a single argument. */
464 struct mips_arg_info
465 {
466 /* True if the argument is passed in a floating-point register, or
467 would have been if we hadn't run out of registers. */
468 bool fpr_p;
469
470 /* The number of words passed in registers, rounded up. */
471 unsigned int reg_words;
472
473 /* For EABI, the offset of the first register from GP_ARG_FIRST or
474 FP_ARG_FIRST. For other ABIs, the offset of the first register from
475 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
476 comment for details).
477
478 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
479 on the stack. */
480 unsigned int reg_offset;
481
482 /* The number of words that must be passed on the stack, rounded up. */
483 unsigned int stack_words;
484
485 /* The offset from the start of the stack overflow area of the argument's
486 first stack word. Only meaningful when STACK_WORDS is nonzero. */
487 unsigned int stack_offset;
488 };
489
490
491 /* Information about an address described by mips_address_type.
492
493 ADDRESS_CONST_INT
494 No fields are used.
495
496 ADDRESS_REG
497 REG is the base register and OFFSET is the constant offset.
498
499 ADDRESS_LO_SUM
500 REG is the register that contains the high part of the address,
501 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
502 is the type of OFFSET's symbol.
503
504 ADDRESS_SYMBOLIC
505 SYMBOL_TYPE is the type of symbol being referenced. */
506
507 struct mips_address_info
508 {
509 enum mips_address_type type;
510 rtx reg;
511 rtx offset;
512 enum mips_symbol_type symbol_type;
513 };
514
515
516 /* One stage in a constant building sequence. These sequences have
517 the form:
518
519 A = VALUE[0]
520 A = A CODE[1] VALUE[1]
521 A = A CODE[2] VALUE[2]
522 ...
523
524 where A is an accumulator, each CODE[i] is a binary rtl operation
525 and each VALUE[i] is a constant integer. */
526 struct mips_integer_op {
527 enum rtx_code code;
528 unsigned HOST_WIDE_INT value;
529 };
530
531
532 /* The largest number of operations needed to load an integer constant.
533 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
534 When the lowest bit is clear, we can try, but reject a sequence with
535 an extra SLL at the end. */
536 #define MIPS_MAX_INTEGER_OPS 7
537
538
539 /* Global variables for machine-dependent things. */
540
541 /* Threshold for data being put into the small data/bss area, instead
542 of the normal data area. */
543 int mips_section_threshold = -1;
544
545 /* Count the number of .file directives, so that .loc is up to date. */
546 int num_source_filenames = 0;
547
548 /* Count the number of sdb related labels are generated (to find block
549 start and end boundaries). */
550 int sdb_label_count = 0;
551
552 /* Next label # for each statement for Silicon Graphics IRIS systems. */
553 int sym_lineno = 0;
554
555 /* Linked list of all externals that are to be emitted when optimizing
556 for the global pointer if they haven't been declared by the end of
557 the program with an appropriate .comm or initialization. */
558
559 struct extern_list GTY (())
560 {
561 struct extern_list *next; /* next external */
562 const char *name; /* name of the external */
563 int size; /* size in bytes */
564 };
565
566 static GTY (()) struct extern_list *extern_head = 0;
567
568 /* Name of the file containing the current function. */
569 const char *current_function_file = "";
570
571 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
572 int set_noreorder;
573 int set_noat;
574 int set_nomacro;
575 int set_volatile;
576
577 /* The next branch instruction is a branch likely, not branch normal. */
578 int mips_branch_likely;
579
580 /* The operands passed to the last cmpMM expander. */
581 rtx cmp_operands[2];
582
583 /* The target cpu for code generation. */
584 enum processor_type mips_arch;
585 const struct mips_cpu_info *mips_arch_info;
586
587 /* The target cpu for optimization and scheduling. */
588 enum processor_type mips_tune;
589 const struct mips_cpu_info *mips_tune_info;
590
591 /* Which instruction set architecture to use. */
592 int mips_isa;
593
594 /* Which ABI to use. */
595 int mips_abi = MIPS_ABI_DEFAULT;
596
597 /* Cost information to use. */
598 const struct mips_rtx_cost_data *mips_cost;
599
600 /* Whether we are generating mips16 hard float code. In mips16 mode
601 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
602 -msoft-float was not specified by the user, which means that we
603 should arrange to call mips32 hard floating point code. */
604 int mips16_hard_float;
605
606 /* The architecture selected by -mipsN. */
607 static const struct mips_cpu_info *mips_isa_info;
608
609 /* If TRUE, we split addresses into their high and low parts in the RTL. */
610 int mips_split_addresses;
611
612 /* Mode used for saving/restoring general purpose registers. */
613 static enum machine_mode gpr_mode;
614
615 /* Array giving truth value on whether or not a given hard register
616 can support a given mode. */
617 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
618
619 /* List of all MIPS punctuation characters used by print_operand. */
620 char mips_print_operand_punct[256];
621
622 /* Map GCC register number to debugger register number. */
623 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
624
625 /* A copy of the original flag_delayed_branch: see override_options. */
626 static int mips_flag_delayed_branch;
627
628 static GTY (()) int mips_output_filename_first_time = 1;
629
630 /* mips_split_p[X] is true if symbols of type X can be split by
631 mips_split_symbol(). */
632 bool mips_split_p[NUM_SYMBOL_TYPES];
633
634 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
635 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
636 if they are matched by a special .md file pattern. */
637 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
638
639 /* Likewise for HIGHs. */
640 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
641
642 /* Map hard register number to register class */
643 const enum reg_class mips_regno_to_class[] =
644 {
645 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
646 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
647 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
648 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
649 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
650 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
651 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
652 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
653 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
654 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
655 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
656 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
660 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
661 HI_REG, LO_REG, NO_REGS, ST_REGS,
662 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
663 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
664 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
665 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
666 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
667 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
668 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
669 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
670 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
671 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
672 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
673 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
674 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
675 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
676 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
677 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
678 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
679 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
680 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
681 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
682 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
683 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
684 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
685 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
686 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
687 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
688 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
689 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
690 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
691 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
692 };
693
694 /* Table of machine dependent attributes. */
695 const struct attribute_spec mips_attribute_table[] =
696 {
697 { "long_call", 0, 0, false, true, true, NULL },
698 { NULL, 0, 0, false, false, false, NULL }
699 };
700 \f
701 /* A table describing all the processors gcc knows about. Names are
702 matched in the order listed. The first mention of an ISA level is
703 taken as the canonical name for that ISA.
704
705 To ease comparison, please keep this table in the same order as
706 gas's mips_cpu_info_table[]. */
707 const struct mips_cpu_info mips_cpu_info_table[] = {
708 /* Entries for generic ISAs */
709 { "mips1", PROCESSOR_R3000, 1 },
710 { "mips2", PROCESSOR_R6000, 2 },
711 { "mips3", PROCESSOR_R4000, 3 },
712 { "mips4", PROCESSOR_R8000, 4 },
713 { "mips32", PROCESSOR_4KC, 32 },
714 { "mips32r2", PROCESSOR_M4K, 33 },
715 { "mips64", PROCESSOR_5KC, 64 },
716
717 /* MIPS I */
718 { "r3000", PROCESSOR_R3000, 1 },
719 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
720 { "r3900", PROCESSOR_R3900, 1 },
721
722 /* MIPS II */
723 { "r6000", PROCESSOR_R6000, 2 },
724
725 /* MIPS III */
726 { "r4000", PROCESSOR_R4000, 3 },
727 { "vr4100", PROCESSOR_R4100, 3 },
728 { "vr4111", PROCESSOR_R4111, 3 },
729 { "vr4120", PROCESSOR_R4120, 3 },
730 { "vr4130", PROCESSOR_R4130, 3 },
731 { "vr4300", PROCESSOR_R4300, 3 },
732 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
733 { "r4600", PROCESSOR_R4600, 3 },
734 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
735 { "r4650", PROCESSOR_R4650, 3 },
736
737 /* MIPS IV */
738 { "r8000", PROCESSOR_R8000, 4 },
739 { "vr5000", PROCESSOR_R5000, 4 },
740 { "vr5400", PROCESSOR_R5400, 4 },
741 { "vr5500", PROCESSOR_R5500, 4 },
742 { "rm7000", PROCESSOR_R7000, 4 },
743 { "rm9000", PROCESSOR_R9000, 4 },
744
745 /* MIPS32 */
746 { "4kc", PROCESSOR_4KC, 32 },
747 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
748 { "4kp", PROCESSOR_4KP, 32 },
749
750 /* MIPS32 Release 2 */
751 { "m4k", PROCESSOR_M4K, 33 },
752 { "24k", PROCESSOR_24K, 33 },
753 { "24kc", PROCESSOR_24K, 33 }, /* 24K no FPU */
754 { "24kf", PROCESSOR_24K, 33 }, /* 24K 1:2 FPU */
755 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
756
757 /* MIPS64 */
758 { "5kc", PROCESSOR_5KC, 64 },
759 { "5kf", PROCESSOR_5KF, 64 },
760 { "20kc", PROCESSOR_20KC, 64 },
761 { "sb1", PROCESSOR_SB1, 64 },
762 { "sr71000", PROCESSOR_SR71000, 64 },
763
764 /* End marker */
765 { 0, 0, 0 }
766 };
767
768 /* Default costs. If these are used for a processor we should look
769 up the actual costs. */
770 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
771 COSTS_N_INSNS (7), /* fp_mult_sf */ \
772 COSTS_N_INSNS (8), /* fp_mult_df */ \
773 COSTS_N_INSNS (23), /* fp_div_sf */ \
774 COSTS_N_INSNS (36), /* fp_div_df */ \
775 COSTS_N_INSNS (10), /* int_mult_si */ \
776 COSTS_N_INSNS (10), /* int_mult_di */ \
777 COSTS_N_INSNS (69), /* int_div_si */ \
778 COSTS_N_INSNS (69), /* int_div_di */ \
779 2, /* branch_cost */ \
780 4 /* memory_latency */
781
782 /* Need to replace these with the costs of calling the appropriate
783 libgcc routine. */
784 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
785 COSTS_N_INSNS (256), /* fp_mult_sf */ \
786 COSTS_N_INSNS (256), /* fp_mult_df */ \
787 COSTS_N_INSNS (256), /* fp_div_sf */ \
788 COSTS_N_INSNS (256) /* fp_div_df */
789
790 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
791 {
792 { /* R3000 */
793 COSTS_N_INSNS (2), /* fp_add */
794 COSTS_N_INSNS (4), /* fp_mult_sf */
795 COSTS_N_INSNS (5), /* fp_mult_df */
796 COSTS_N_INSNS (12), /* fp_div_sf */
797 COSTS_N_INSNS (19), /* fp_div_df */
798 COSTS_N_INSNS (12), /* int_mult_si */
799 COSTS_N_INSNS (12), /* int_mult_di */
800 COSTS_N_INSNS (35), /* int_div_si */
801 COSTS_N_INSNS (35), /* int_div_di */
802 1, /* branch_cost */
803 4 /* memory_latency */
804
805 },
806 { /* 4KC */
807 SOFT_FP_COSTS,
808 COSTS_N_INSNS (6), /* int_mult_si */
809 COSTS_N_INSNS (6), /* int_mult_di */
810 COSTS_N_INSNS (36), /* int_div_si */
811 COSTS_N_INSNS (36), /* int_div_di */
812 1, /* branch_cost */
813 4 /* memory_latency */
814 },
815 { /* 4KP */
816 SOFT_FP_COSTS,
817 COSTS_N_INSNS (36), /* int_mult_si */
818 COSTS_N_INSNS (36), /* int_mult_di */
819 COSTS_N_INSNS (37), /* int_div_si */
820 COSTS_N_INSNS (37), /* int_div_di */
821 1, /* branch_cost */
822 4 /* memory_latency */
823 },
824 { /* 5KC */
825 SOFT_FP_COSTS,
826 COSTS_N_INSNS (4), /* int_mult_si */
827 COSTS_N_INSNS (11), /* int_mult_di */
828 COSTS_N_INSNS (36), /* int_div_si */
829 COSTS_N_INSNS (68), /* int_div_di */
830 1, /* branch_cost */
831 4 /* memory_latency */
832 },
833 { /* 5KF */
834 COSTS_N_INSNS (4), /* fp_add */
835 COSTS_N_INSNS (4), /* fp_mult_sf */
836 COSTS_N_INSNS (5), /* fp_mult_df */
837 COSTS_N_INSNS (17), /* fp_div_sf */
838 COSTS_N_INSNS (32), /* fp_div_df */
839 COSTS_N_INSNS (4), /* int_mult_si */
840 COSTS_N_INSNS (11), /* int_mult_di */
841 COSTS_N_INSNS (36), /* int_div_si */
842 COSTS_N_INSNS (68), /* int_div_di */
843 1, /* branch_cost */
844 4 /* memory_latency */
845 },
846 { /* 20KC */
847 DEFAULT_COSTS
848 },
849 { /* 24k */
850 COSTS_N_INSNS (8), /* fp_add */
851 COSTS_N_INSNS (8), /* fp_mult_sf */
852 COSTS_N_INSNS (10), /* fp_mult_df */
853 COSTS_N_INSNS (34), /* fp_div_sf */
854 COSTS_N_INSNS (64), /* fp_div_df */
855 COSTS_N_INSNS (5), /* int_mult_si */
856 COSTS_N_INSNS (5), /* int_mult_di */
857 COSTS_N_INSNS (41), /* int_div_si */
858 COSTS_N_INSNS (41), /* int_div_di */
859 1, /* branch_cost */
860 4 /* memory_latency */
861 },
862 { /* 24kx */
863 COSTS_N_INSNS (4), /* fp_add */
864 COSTS_N_INSNS (4), /* fp_mult_sf */
865 COSTS_N_INSNS (5), /* fp_mult_df */
866 COSTS_N_INSNS (17), /* fp_div_sf */
867 COSTS_N_INSNS (32), /* fp_div_df */
868 COSTS_N_INSNS (5), /* int_mult_si */
869 COSTS_N_INSNS (5), /* int_mult_di */
870 COSTS_N_INSNS (41), /* int_div_si */
871 COSTS_N_INSNS (41), /* int_div_di */
872 1, /* branch_cost */
873 4 /* memory_latency */
874 },
875 { /* M4k */
876 DEFAULT_COSTS
877 },
878 { /* R3900 */
879 COSTS_N_INSNS (2), /* fp_add */
880 COSTS_N_INSNS (4), /* fp_mult_sf */
881 COSTS_N_INSNS (5), /* fp_mult_df */
882 COSTS_N_INSNS (12), /* fp_div_sf */
883 COSTS_N_INSNS (19), /* fp_div_df */
884 COSTS_N_INSNS (2), /* int_mult_si */
885 COSTS_N_INSNS (2), /* int_mult_di */
886 COSTS_N_INSNS (35), /* int_div_si */
887 COSTS_N_INSNS (35), /* int_div_di */
888 1, /* branch_cost */
889 4 /* memory_latency */
890 },
891 { /* R6000 */
892 COSTS_N_INSNS (3), /* fp_add */
893 COSTS_N_INSNS (5), /* fp_mult_sf */
894 COSTS_N_INSNS (6), /* fp_mult_df */
895 COSTS_N_INSNS (15), /* fp_div_sf */
896 COSTS_N_INSNS (16), /* fp_div_df */
897 COSTS_N_INSNS (17), /* int_mult_si */
898 COSTS_N_INSNS (17), /* int_mult_di */
899 COSTS_N_INSNS (38), /* int_div_si */
900 COSTS_N_INSNS (38), /* int_div_di */
901 2, /* branch_cost */
902 6 /* memory_latency */
903 },
904 { /* R4000 */
905 COSTS_N_INSNS (6), /* fp_add */
906 COSTS_N_INSNS (7), /* fp_mult_sf */
907 COSTS_N_INSNS (8), /* fp_mult_df */
908 COSTS_N_INSNS (23), /* fp_div_sf */
909 COSTS_N_INSNS (36), /* fp_div_df */
910 COSTS_N_INSNS (10), /* int_mult_si */
911 COSTS_N_INSNS (10), /* int_mult_di */
912 COSTS_N_INSNS (69), /* int_div_si */
913 COSTS_N_INSNS (69), /* int_div_di */
914 2, /* branch_cost */
915 6 /* memory_latency */
916 },
917 { /* R4100 */
918 DEFAULT_COSTS
919 },
920 { /* R4111 */
921 DEFAULT_COSTS
922 },
923 { /* R4120 */
924 DEFAULT_COSTS
925 },
926 { /* R4130 */
927 /* The only costs that appear to be updated here are
928 integer multiplication. */
929 SOFT_FP_COSTS,
930 COSTS_N_INSNS (4), /* int_mult_si */
931 COSTS_N_INSNS (6), /* int_mult_di */
932 COSTS_N_INSNS (69), /* int_div_si */
933 COSTS_N_INSNS (69), /* int_div_di */
934 1, /* branch_cost */
935 4 /* memory_latency */
936 },
937 { /* R4300 */
938 DEFAULT_COSTS
939 },
940 { /* R4600 */
941 DEFAULT_COSTS
942 },
943 { /* R4650 */
944 DEFAULT_COSTS
945 },
946 { /* R5000 */
947 COSTS_N_INSNS (6), /* fp_add */
948 COSTS_N_INSNS (4), /* fp_mult_sf */
949 COSTS_N_INSNS (5), /* fp_mult_df */
950 COSTS_N_INSNS (23), /* fp_div_sf */
951 COSTS_N_INSNS (36), /* fp_div_df */
952 COSTS_N_INSNS (5), /* int_mult_si */
953 COSTS_N_INSNS (5), /* int_mult_di */
954 COSTS_N_INSNS (36), /* int_div_si */
955 COSTS_N_INSNS (36), /* int_div_di */
956 1, /* branch_cost */
957 4 /* memory_latency */
958 },
959 { /* R5400 */
960 COSTS_N_INSNS (6), /* fp_add */
961 COSTS_N_INSNS (5), /* fp_mult_sf */
962 COSTS_N_INSNS (6), /* fp_mult_df */
963 COSTS_N_INSNS (30), /* fp_div_sf */
964 COSTS_N_INSNS (59), /* fp_div_df */
965 COSTS_N_INSNS (3), /* int_mult_si */
966 COSTS_N_INSNS (4), /* int_mult_di */
967 COSTS_N_INSNS (42), /* int_div_si */
968 COSTS_N_INSNS (74), /* int_div_di */
969 1, /* branch_cost */
970 4 /* memory_latency */
971 },
972 { /* R5500 */
973 COSTS_N_INSNS (6), /* fp_add */
974 COSTS_N_INSNS (5), /* fp_mult_sf */
975 COSTS_N_INSNS (6), /* fp_mult_df */
976 COSTS_N_INSNS (30), /* fp_div_sf */
977 COSTS_N_INSNS (59), /* fp_div_df */
978 COSTS_N_INSNS (5), /* int_mult_si */
979 COSTS_N_INSNS (9), /* int_mult_di */
980 COSTS_N_INSNS (42), /* int_div_si */
981 COSTS_N_INSNS (74), /* int_div_di */
982 1, /* branch_cost */
983 4 /* memory_latency */
984 },
985 { /* R7000 */
986 /* The only costs that are changed here are
987 integer multiplication. */
988 COSTS_N_INSNS (6), /* fp_add */
989 COSTS_N_INSNS (7), /* fp_mult_sf */
990 COSTS_N_INSNS (8), /* fp_mult_df */
991 COSTS_N_INSNS (23), /* fp_div_sf */
992 COSTS_N_INSNS (36), /* fp_div_df */
993 COSTS_N_INSNS (5), /* int_mult_si */
994 COSTS_N_INSNS (9), /* int_mult_di */
995 COSTS_N_INSNS (69), /* int_div_si */
996 COSTS_N_INSNS (69), /* int_div_di */
997 1, /* branch_cost */
998 4 /* memory_latency */
999 },
1000 { /* R8000 */
1001 DEFAULT_COSTS
1002 },
1003 { /* R9000 */
1004 /* The only costs that are changed here are
1005 integer multiplication. */
1006 COSTS_N_INSNS (6), /* fp_add */
1007 COSTS_N_INSNS (7), /* fp_mult_sf */
1008 COSTS_N_INSNS (8), /* fp_mult_df */
1009 COSTS_N_INSNS (23), /* fp_div_sf */
1010 COSTS_N_INSNS (36), /* fp_div_df */
1011 COSTS_N_INSNS (3), /* int_mult_si */
1012 COSTS_N_INSNS (8), /* int_mult_di */
1013 COSTS_N_INSNS (69), /* int_div_si */
1014 COSTS_N_INSNS (69), /* int_div_di */
1015 1, /* branch_cost */
1016 4 /* memory_latency */
1017 },
1018 { /* SB1 */
1019 COSTS_N_INSNS (4), /* fp_add */
1020 COSTS_N_INSNS (4), /* fp_mult_sf */
1021 COSTS_N_INSNS (4), /* fp_mult_df */
1022 COSTS_N_INSNS (24), /* fp_div_sf */
1023 COSTS_N_INSNS (32), /* fp_div_df */
1024 COSTS_N_INSNS (3), /* int_mult_si */
1025 COSTS_N_INSNS (4), /* int_mult_di */
1026 COSTS_N_INSNS (36), /* int_div_si */
1027 COSTS_N_INSNS (68), /* int_div_di */
1028 1, /* branch_cost */
1029 4 /* memory_latency */
1030 },
1031 { /* SR71000 */
1032 DEFAULT_COSTS
1033 },
1034 };
1035
1036 \f
1037 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1038 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1039 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1040 #endif
1041 \f
1042 /* Initialize the GCC target structure. */
1043 #undef TARGET_ASM_ALIGNED_HI_OP
1044 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1045 #undef TARGET_ASM_ALIGNED_SI_OP
1046 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1047 #undef TARGET_ASM_ALIGNED_DI_OP
1048 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1049
1050 #undef TARGET_ASM_FUNCTION_PROLOGUE
1051 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1052 #undef TARGET_ASM_FUNCTION_EPILOGUE
1053 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1054 #undef TARGET_ASM_SELECT_RTX_SECTION
1055 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1056 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1057 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1058
1059 #undef TARGET_SCHED_REORDER
1060 #define TARGET_SCHED_REORDER mips_sched_reorder
1061 #undef TARGET_SCHED_VARIABLE_ISSUE
1062 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1063 #undef TARGET_SCHED_ADJUST_COST
1064 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1065 #undef TARGET_SCHED_ISSUE_RATE
1066 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1067 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1068 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1069 mips_multipass_dfa_lookahead
1070
1071 #undef TARGET_DEFAULT_TARGET_FLAGS
1072 #define TARGET_DEFAULT_TARGET_FLAGS \
1073 (TARGET_DEFAULT \
1074 | TARGET_CPU_DEFAULT \
1075 | TARGET_ENDIAN_DEFAULT \
1076 | TARGET_FP_EXCEPTIONS_DEFAULT \
1077 | MASK_CHECK_ZERO_DIV \
1078 | MASK_FUSED_MADD)
1079 #undef TARGET_HANDLE_OPTION
1080 #define TARGET_HANDLE_OPTION mips_handle_option
1081
1082 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1083 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1084
1085 #undef TARGET_VALID_POINTER_MODE
1086 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1087 #undef TARGET_RTX_COSTS
1088 #define TARGET_RTX_COSTS mips_rtx_costs
1089 #undef TARGET_ADDRESS_COST
1090 #define TARGET_ADDRESS_COST mips_address_cost
1091
1092 #undef TARGET_IN_SMALL_DATA_P
1093 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1094
1095 #undef TARGET_MACHINE_DEPENDENT_REORG
1096 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1097
1098 #undef TARGET_ASM_FILE_START
1099 #undef TARGET_ASM_FILE_END
1100 #define TARGET_ASM_FILE_START mips_file_start
1101 #define TARGET_ASM_FILE_END mips_file_end
1102 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1103 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1104
1105 #undef TARGET_INIT_LIBFUNCS
1106 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1107
1108 #undef TARGET_BUILD_BUILTIN_VA_LIST
1109 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1110 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1111 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1112
1113 #undef TARGET_PROMOTE_FUNCTION_ARGS
1114 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1115 #undef TARGET_PROMOTE_FUNCTION_RETURN
1116 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1117 #undef TARGET_PROMOTE_PROTOTYPES
1118 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1119
1120 #undef TARGET_RETURN_IN_MEMORY
1121 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1122 #undef TARGET_RETURN_IN_MSB
1123 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1124
1125 #undef TARGET_ASM_OUTPUT_MI_THUNK
1126 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1127 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1128 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1129
1130 #undef TARGET_SETUP_INCOMING_VARARGS
1131 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1132 #undef TARGET_STRICT_ARGUMENT_NAMING
1133 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1134 #undef TARGET_MUST_PASS_IN_STACK
1135 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1136 #undef TARGET_PASS_BY_REFERENCE
1137 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1138 #undef TARGET_CALLEE_COPIES
1139 #define TARGET_CALLEE_COPIES mips_callee_copies
1140 #undef TARGET_ARG_PARTIAL_BYTES
1141 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1142
1143 #undef TARGET_MODE_REP_EXTENDED
1144 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1145
1146 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1147 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1148
1149 #undef TARGET_INIT_BUILTINS
1150 #define TARGET_INIT_BUILTINS mips_init_builtins
1151 #undef TARGET_EXPAND_BUILTIN
1152 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1153
1154 #undef TARGET_HAVE_TLS
1155 #define TARGET_HAVE_TLS HAVE_AS_TLS
1156
1157 #undef TARGET_CANNOT_FORCE_CONST_MEM
1158 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1159
1160 #undef TARGET_ENCODE_SECTION_INFO
1161 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1162
1163 #undef TARGET_ATTRIBUTE_TABLE
1164 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1165
1166 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1167 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1168
1169 #undef TARGET_MIN_ANCHOR_OFFSET
1170 #define TARGET_MIN_ANCHOR_OFFSET -32768
1171 #undef TARGET_MAX_ANCHOR_OFFSET
1172 #define TARGET_MAX_ANCHOR_OFFSET 32767
1173 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1174 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1175 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1176 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1177
1178 struct gcc_target targetm = TARGET_INITIALIZER;
1179 \f
1180 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1181
1182 static enum mips_symbol_type
1183 mips_classify_symbol (rtx x)
1184 {
1185 if (GET_CODE (x) == LABEL_REF)
1186 {
1187 if (TARGET_MIPS16)
1188 return SYMBOL_CONSTANT_POOL;
1189 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1190 return SYMBOL_GOT_LOCAL;
1191 return SYMBOL_GENERAL;
1192 }
1193
1194 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1195
1196 if (SYMBOL_REF_TLS_MODEL (x))
1197 return SYMBOL_TLS;
1198
1199 if (CONSTANT_POOL_ADDRESS_P (x))
1200 {
1201 if (TARGET_MIPS16)
1202 return SYMBOL_CONSTANT_POOL;
1203
1204 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1205 return SYMBOL_SMALL_DATA;
1206 }
1207
1208 if (SYMBOL_REF_SMALL_P (x))
1209 return SYMBOL_SMALL_DATA;
1210
1211 if (TARGET_ABICALLS)
1212 {
1213 if (SYMBOL_REF_DECL (x) == 0)
1214 {
1215 if (!SYMBOL_REF_LOCAL_P (x))
1216 return SYMBOL_GOT_GLOBAL;
1217 }
1218 else
1219 {
1220 /* Don't use GOT accesses for locally-binding symbols if
1221 TARGET_ABSOLUTE_ABICALLS. Otherwise, there are three
1222 cases to consider:
1223
1224 - o32 PIC (either with or without explicit relocs)
1225 - n32/n64 PIC without explicit relocs
1226 - n32/n64 PIC with explicit relocs
1227
1228 In the first case, both local and global accesses will use an
1229 R_MIPS_GOT16 relocation. We must correctly predict which of
1230 the two semantics (local or global) the assembler and linker
1231 will apply. The choice doesn't depend on the symbol's
1232 visibility, so we deliberately ignore decl_visibility and
1233 binds_local_p here.
1234
1235 In the second case, the assembler will not use R_MIPS_GOT16
1236 relocations, but it chooses between local and global accesses
1237 in the same way as for o32 PIC.
1238
1239 In the third case we have more freedom since both forms of
1240 access will work for any kind of symbol. However, there seems
1241 little point in doing things differently. */
1242 if (DECL_P (SYMBOL_REF_DECL (x))
1243 && TREE_PUBLIC (SYMBOL_REF_DECL (x))
1244 && !(TARGET_ABSOLUTE_ABICALLS
1245 && targetm.binds_local_p (SYMBOL_REF_DECL (x))))
1246 return SYMBOL_GOT_GLOBAL;
1247 }
1248
1249 if (!TARGET_ABSOLUTE_ABICALLS)
1250 return SYMBOL_GOT_LOCAL;
1251 }
1252
1253 return SYMBOL_GENERAL;
1254 }
1255
1256
1257 /* Split X into a base and a constant offset, storing them in *BASE
1258 and *OFFSET respectively. */
1259
1260 static void
1261 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
1262 {
1263 *offset = 0;
1264
1265 if (GET_CODE (x) == CONST)
1266 x = XEXP (x, 0);
1267
1268 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1269 {
1270 *offset += INTVAL (XEXP (x, 1));
1271 x = XEXP (x, 0);
1272 }
1273 *base = x;
1274 }
1275
1276
1277 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
1278 to the same object as SYMBOL, or to the same object_block. */
1279
1280 static bool
1281 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
1282 {
1283 if (GET_CODE (symbol) != SYMBOL_REF)
1284 return false;
1285
1286 if (CONSTANT_POOL_ADDRESS_P (symbol)
1287 && offset >= 0
1288 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
1289 return true;
1290
1291 if (SYMBOL_REF_DECL (symbol) != 0
1292 && offset >= 0
1293 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
1294 return true;
1295
1296 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
1297 && SYMBOL_REF_BLOCK (symbol)
1298 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
1299 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
1300 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
1301 return true;
1302
1303 return false;
1304 }
1305
1306
1307 /* Return true if X is a symbolic constant that can be calculated in
1308 the same way as a bare symbol. If it is, store the type of the
1309 symbol in *SYMBOL_TYPE. */
1310
1311 bool
1312 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1313 {
1314 HOST_WIDE_INT offset;
1315
1316 mips_split_const (x, &x, &offset);
1317 if (UNSPEC_ADDRESS_P (x))
1318 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1319 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1320 {
1321 *symbol_type = mips_classify_symbol (x);
1322 if (*symbol_type == SYMBOL_TLS)
1323 return false;
1324 }
1325 else
1326 return false;
1327
1328 if (offset == 0)
1329 return true;
1330
1331 /* Check whether a nonzero offset is valid for the underlying
1332 relocations. */
1333 switch (*symbol_type)
1334 {
1335 case SYMBOL_GENERAL:
1336 case SYMBOL_64_HIGH:
1337 case SYMBOL_64_MID:
1338 case SYMBOL_64_LOW:
1339 /* If the target has 64-bit pointers and the object file only
1340 supports 32-bit symbols, the values of those symbols will be
1341 sign-extended. In this case we can't allow an arbitrary offset
1342 in case the 32-bit value X + OFFSET has a different sign from X. */
1343 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1344 return mips_offset_within_object_p (x, offset);
1345
1346 /* In other cases the relocations can handle any offset. */
1347 return true;
1348
1349 case SYMBOL_CONSTANT_POOL:
1350 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1351 In this case, we no longer have access to the underlying constant,
1352 but the original symbol-based access was known to be valid. */
1353 if (GET_CODE (x) == LABEL_REF)
1354 return true;
1355
1356 /* Fall through. */
1357
1358 case SYMBOL_SMALL_DATA:
1359 /* Make sure that the offset refers to something within the
1360 underlying object. This should guarantee that the final
1361 PC- or GP-relative offset is within the 16-bit limit. */
1362 return mips_offset_within_object_p (x, offset);
1363
1364 case SYMBOL_GOT_LOCAL:
1365 case SYMBOL_GOTOFF_PAGE:
1366 /* The linker should provide enough local GOT entries for a
1367 16-bit offset. Larger offsets may lead to GOT overflow. */
1368 return SMALL_OPERAND (offset);
1369
1370 case SYMBOL_GOT_GLOBAL:
1371 case SYMBOL_GOTOFF_GLOBAL:
1372 case SYMBOL_GOTOFF_CALL:
1373 case SYMBOL_GOTOFF_LOADGP:
1374 case SYMBOL_TLSGD:
1375 case SYMBOL_TLSLDM:
1376 case SYMBOL_DTPREL:
1377 case SYMBOL_TPREL:
1378 case SYMBOL_GOTTPREL:
1379 case SYMBOL_TLS:
1380 return false;
1381 }
1382 gcc_unreachable ();
1383 }
1384
1385
1386 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1387
1388 int
1389 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1390 {
1391 if (regno >= FIRST_PSEUDO_REGISTER)
1392 {
1393 if (!strict)
1394 return true;
1395 regno = reg_renumber[regno];
1396 }
1397
1398 /* These fake registers will be eliminated to either the stack or
1399 hard frame pointer, both of which are usually valid base registers.
1400 Reload deals with the cases where the eliminated form isn't valid. */
1401 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1402 return true;
1403
1404 /* In mips16 mode, the stack pointer can only address word and doubleword
1405 values, nothing smaller. There are two problems here:
1406
1407 (a) Instantiating virtual registers can introduce new uses of the
1408 stack pointer. If these virtual registers are valid addresses,
1409 the stack pointer should be too.
1410
1411 (b) Most uses of the stack pointer are not made explicit until
1412 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1413 We don't know until that stage whether we'll be eliminating to the
1414 stack pointer (which needs the restriction) or the hard frame
1415 pointer (which doesn't).
1416
1417 All in all, it seems more consistent to only enforce this restriction
1418 during and after reload. */
1419 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1420 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1421
1422 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1423 }
1424
1425
1426 /* Return true if X is a valid base register for the given mode.
1427 Allow only hard registers if STRICT. */
1428
1429 static bool
1430 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1431 {
1432 if (!strict && GET_CODE (x) == SUBREG)
1433 x = SUBREG_REG (x);
1434
1435 return (REG_P (x)
1436 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1437 }
1438
1439
1440 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1441 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1442
1443 static bool
1444 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1445 enum machine_mode mode)
1446 {
1447 switch (symbol_type)
1448 {
1449 case SYMBOL_GENERAL:
1450 return !TARGET_MIPS16;
1451
1452 case SYMBOL_SMALL_DATA:
1453 return true;
1454
1455 case SYMBOL_CONSTANT_POOL:
1456 /* PC-relative addressing is only available for lw and ld. */
1457 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1458
1459 case SYMBOL_GOT_LOCAL:
1460 return true;
1461
1462 case SYMBOL_GOT_GLOBAL:
1463 /* The address will have to be loaded from the GOT first. */
1464 return false;
1465
1466 case SYMBOL_TLSGD:
1467 case SYMBOL_TLSLDM:
1468 case SYMBOL_DTPREL:
1469 case SYMBOL_TPREL:
1470 case SYMBOL_GOTTPREL:
1471 case SYMBOL_TLS:
1472 return false;
1473
1474 case SYMBOL_GOTOFF_PAGE:
1475 case SYMBOL_GOTOFF_GLOBAL:
1476 case SYMBOL_GOTOFF_CALL:
1477 case SYMBOL_GOTOFF_LOADGP:
1478 case SYMBOL_64_HIGH:
1479 case SYMBOL_64_MID:
1480 case SYMBOL_64_LOW:
1481 return true;
1482 }
1483 gcc_unreachable ();
1484 }
1485
1486
1487 /* Return true if X is a valid address for machine mode MODE. If it is,
1488 fill in INFO appropriately. STRICT is true if we should only accept
1489 hard base registers. */
1490
1491 static bool
1492 mips_classify_address (struct mips_address_info *info, rtx x,
1493 enum machine_mode mode, int strict)
1494 {
1495 switch (GET_CODE (x))
1496 {
1497 case REG:
1498 case SUBREG:
1499 info->type = ADDRESS_REG;
1500 info->reg = x;
1501 info->offset = const0_rtx;
1502 return mips_valid_base_register_p (info->reg, mode, strict);
1503
1504 case PLUS:
1505 info->type = ADDRESS_REG;
1506 info->reg = XEXP (x, 0);
1507 info->offset = XEXP (x, 1);
1508 return (mips_valid_base_register_p (info->reg, mode, strict)
1509 && const_arith_operand (info->offset, VOIDmode));
1510
1511 case LO_SUM:
1512 info->type = ADDRESS_LO_SUM;
1513 info->reg = XEXP (x, 0);
1514 info->offset = XEXP (x, 1);
1515 return (mips_valid_base_register_p (info->reg, mode, strict)
1516 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1517 && mips_symbolic_address_p (info->symbol_type, mode)
1518 && mips_lo_relocs[info->symbol_type] != 0);
1519
1520 case CONST_INT:
1521 /* Small-integer addresses don't occur very often, but they
1522 are legitimate if $0 is a valid base register. */
1523 info->type = ADDRESS_CONST_INT;
1524 return !TARGET_MIPS16 && SMALL_INT (x);
1525
1526 case CONST:
1527 case LABEL_REF:
1528 case SYMBOL_REF:
1529 info->type = ADDRESS_SYMBOLIC;
1530 return (mips_symbolic_constant_p (x, &info->symbol_type)
1531 && mips_symbolic_address_p (info->symbol_type, mode)
1532 && !mips_split_p[info->symbol_type]);
1533
1534 default:
1535 return false;
1536 }
1537 }
1538
1539 /* Return true if X is a thread-local symbol. */
1540
1541 static bool
1542 mips_tls_operand_p (rtx x)
1543 {
1544 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1545 }
1546
1547 /* Return true if X can not be forced into a constant pool. */
1548
1549 static int
1550 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1551 {
1552 return mips_tls_operand_p (*x);
1553 }
1554
1555 /* Return true if X can not be forced into a constant pool. */
1556
1557 static bool
1558 mips_cannot_force_const_mem (rtx x)
1559 {
1560 rtx base;
1561 HOST_WIDE_INT offset;
1562
1563 if (!TARGET_MIPS16)
1564 {
1565 /* As an optimization, reject constants that mips_legitimize_move
1566 can expand inline.
1567
1568 Suppose we have a multi-instruction sequence that loads constant C
1569 into register R. If R does not get allocated a hard register, and
1570 R is used in an operand that allows both registers and memory
1571 references, reload will consider forcing C into memory and using
1572 one of the instruction's memory alternatives. Returning false
1573 here will force it to use an input reload instead. */
1574 if (GET_CODE (x) == CONST_INT)
1575 return true;
1576
1577 mips_split_const (x, &base, &offset);
1578 if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
1579 return true;
1580 }
1581
1582 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1583 return true;
1584
1585 return false;
1586 }
1587
1588 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1589 constant pools, but normal-mode code doesn't need to. */
1590
1591 static bool
1592 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1593 rtx x ATTRIBUTE_UNUSED)
1594 {
1595 return !TARGET_MIPS16;
1596 }
1597 \f
1598 /* Return the number of instructions needed to load a symbol of the
1599 given type into a register. If valid in an address, the same number
1600 of instructions are needed for loads and stores. Treat extended
1601 mips16 instructions as two instructions. */
1602
1603 static int
1604 mips_symbol_insns (enum mips_symbol_type type)
1605 {
1606 switch (type)
1607 {
1608 case SYMBOL_GENERAL:
1609 /* In mips16 code, general symbols must be fetched from the
1610 constant pool. */
1611 if (TARGET_MIPS16)
1612 return 0;
1613
1614 /* When using 64-bit symbols, we need 5 preparatory instructions,
1615 such as:
1616
1617 lui $at,%highest(symbol)
1618 daddiu $at,$at,%higher(symbol)
1619 dsll $at,$at,16
1620 daddiu $at,$at,%hi(symbol)
1621 dsll $at,$at,16
1622
1623 The final address is then $at + %lo(symbol). With 32-bit
1624 symbols we just need a preparatory lui. */
1625 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1626
1627 case SYMBOL_SMALL_DATA:
1628 return 1;
1629
1630 case SYMBOL_CONSTANT_POOL:
1631 /* This case is for mips16 only. Assume we'll need an
1632 extended instruction. */
1633 return 2;
1634
1635 case SYMBOL_GOT_LOCAL:
1636 case SYMBOL_GOT_GLOBAL:
1637 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1638 the local/global classification is accurate. See override_options
1639 for details.
1640
1641 The worst cases are:
1642
1643 (1) For local symbols when generating o32 or o64 code. The assembler
1644 will use:
1645
1646 lw $at,%got(symbol)
1647 nop
1648
1649 ...and the final address will be $at + %lo(symbol).
1650
1651 (2) For global symbols when -mxgot. The assembler will use:
1652
1653 lui $at,%got_hi(symbol)
1654 (d)addu $at,$at,$gp
1655
1656 ...and the final address will be $at + %got_lo(symbol). */
1657 return 3;
1658
1659 case SYMBOL_GOTOFF_PAGE:
1660 case SYMBOL_GOTOFF_GLOBAL:
1661 case SYMBOL_GOTOFF_CALL:
1662 case SYMBOL_GOTOFF_LOADGP:
1663 case SYMBOL_64_HIGH:
1664 case SYMBOL_64_MID:
1665 case SYMBOL_64_LOW:
1666 case SYMBOL_TLSGD:
1667 case SYMBOL_TLSLDM:
1668 case SYMBOL_DTPREL:
1669 case SYMBOL_GOTTPREL:
1670 case SYMBOL_TPREL:
1671 /* Check whether the offset is a 16- or 32-bit value. */
1672 return mips_split_p[type] ? 2 : 1;
1673
1674 case SYMBOL_TLS:
1675 /* We don't treat a bare TLS symbol as a constant. */
1676 return 0;
1677 }
1678 gcc_unreachable ();
1679 }
1680
1681 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1682
1683 bool
1684 mips_stack_address_p (rtx x, enum machine_mode mode)
1685 {
1686 struct mips_address_info addr;
1687
1688 return (mips_classify_address (&addr, x, mode, false)
1689 && addr.type == ADDRESS_REG
1690 && addr.reg == stack_pointer_rtx);
1691 }
1692
1693 /* Return true if a value at OFFSET bytes from BASE can be accessed
1694 using an unextended mips16 instruction. MODE is the mode of the
1695 value.
1696
1697 Usually the offset in an unextended instruction is a 5-bit field.
1698 The offset is unsigned and shifted left once for HIs, twice
1699 for SIs, and so on. An exception is SImode accesses off the
1700 stack pointer, which have an 8-bit immediate field. */
1701
1702 static bool
1703 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1704 {
1705 if (TARGET_MIPS16
1706 && GET_CODE (offset) == CONST_INT
1707 && INTVAL (offset) >= 0
1708 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1709 {
1710 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1711 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1712 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1713 }
1714 return false;
1715 }
1716
1717
1718 /* Return the number of instructions needed to load or store a value
1719 of mode MODE at X. Return 0 if X isn't valid for MODE.
1720
1721 For mips16 code, count extended instructions as two instructions. */
1722
1723 int
1724 mips_address_insns (rtx x, enum machine_mode mode)
1725 {
1726 struct mips_address_info addr;
1727 int factor;
1728
1729 if (mode == BLKmode)
1730 /* BLKmode is used for single unaligned loads and stores. */
1731 factor = 1;
1732 else
1733 /* Each word of a multi-word value will be accessed individually. */
1734 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1735
1736 if (mips_classify_address (&addr, x, mode, false))
1737 switch (addr.type)
1738 {
1739 case ADDRESS_REG:
1740 if (TARGET_MIPS16
1741 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1742 return factor * 2;
1743 return factor;
1744
1745 case ADDRESS_LO_SUM:
1746 return (TARGET_MIPS16 ? factor * 2 : factor);
1747
1748 case ADDRESS_CONST_INT:
1749 return factor;
1750
1751 case ADDRESS_SYMBOLIC:
1752 return factor * mips_symbol_insns (addr.symbol_type);
1753 }
1754 return 0;
1755 }
1756
1757
1758 /* Likewise for constant X. */
1759
1760 int
1761 mips_const_insns (rtx x)
1762 {
1763 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1764 enum mips_symbol_type symbol_type;
1765 HOST_WIDE_INT offset;
1766
1767 switch (GET_CODE (x))
1768 {
1769 case HIGH:
1770 if (TARGET_MIPS16
1771 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1772 || !mips_split_p[symbol_type])
1773 return 0;
1774
1775 return 1;
1776
1777 case CONST_INT:
1778 if (TARGET_MIPS16)
1779 /* Unsigned 8-bit constants can be loaded using an unextended
1780 LI instruction. Unsigned 16-bit constants can be loaded
1781 using an extended LI. Negative constants must be loaded
1782 using LI and then negated. */
1783 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1784 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1785 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1786 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1787 : 0);
1788
1789 return mips_build_integer (codes, INTVAL (x));
1790
1791 case CONST_DOUBLE:
1792 case CONST_VECTOR:
1793 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1794
1795 case CONST:
1796 if (CONST_GP_P (x))
1797 return 1;
1798
1799 /* See if we can refer to X directly. */
1800 if (mips_symbolic_constant_p (x, &symbol_type))
1801 return mips_symbol_insns (symbol_type);
1802
1803 /* Otherwise try splitting the constant into a base and offset.
1804 16-bit offsets can be added using an extra addiu. Larger offsets
1805 must be calculated separately and then added to the base. */
1806 mips_split_const (x, &x, &offset);
1807 if (offset != 0)
1808 {
1809 int n = mips_const_insns (x);
1810 if (n != 0)
1811 {
1812 if (SMALL_OPERAND (offset))
1813 return n + 1;
1814 else
1815 return n + 1 + mips_build_integer (codes, offset);
1816 }
1817 }
1818 return 0;
1819
1820 case SYMBOL_REF:
1821 case LABEL_REF:
1822 return mips_symbol_insns (mips_classify_symbol (x));
1823
1824 default:
1825 return 0;
1826 }
1827 }
1828
1829
1830 /* Return the number of instructions needed for memory reference X.
1831 Count extended mips16 instructions as two instructions. */
1832
1833 int
1834 mips_fetch_insns (rtx x)
1835 {
1836 gcc_assert (MEM_P (x));
1837 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1838 }
1839
1840
1841 /* Return the number of instructions needed for an integer division. */
1842
1843 int
1844 mips_idiv_insns (void)
1845 {
1846 int count;
1847
1848 count = 1;
1849 if (TARGET_CHECK_ZERO_DIV)
1850 {
1851 if (GENERATE_DIVIDE_TRAPS)
1852 count++;
1853 else
1854 count += 2;
1855 }
1856
1857 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1858 count++;
1859 return count;
1860 }
1861 \f
1862 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1863 returns a nonzero value if X is a legitimate address for a memory
1864 operand of the indicated MODE. STRICT is nonzero if this function
1865 is called during reload. */
1866
1867 bool
1868 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1869 {
1870 struct mips_address_info addr;
1871
1872 return mips_classify_address (&addr, x, mode, strict);
1873 }
1874
1875
1876 /* Copy VALUE to a register and return that register. If new psuedos
1877 are allowed, copy it into a new register, otherwise use DEST. */
1878
1879 static rtx
1880 mips_force_temporary (rtx dest, rtx value)
1881 {
1882 if (!no_new_pseudos)
1883 return force_reg (Pmode, value);
1884 else
1885 {
1886 emit_move_insn (copy_rtx (dest), value);
1887 return dest;
1888 }
1889 }
1890
1891
1892 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1893 and is used to load the high part into a register. */
1894
1895 rtx
1896 mips_split_symbol (rtx temp, rtx addr)
1897 {
1898 rtx high;
1899
1900 if (TARGET_MIPS16)
1901 high = mips16_gp_pseudo_reg ();
1902 else
1903 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1904 return gen_rtx_LO_SUM (Pmode, high, addr);
1905 }
1906
1907
1908 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1909 type SYMBOL_TYPE. */
1910
1911 rtx
1912 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1913 {
1914 rtx base;
1915 HOST_WIDE_INT offset;
1916
1917 mips_split_const (address, &base, &offset);
1918 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1919 UNSPEC_ADDRESS_FIRST + symbol_type);
1920 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1921 }
1922
1923
1924 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1925 high part to BASE and return the result. Just return BASE otherwise.
1926 TEMP is available as a temporary register if needed.
1927
1928 The returned expression can be used as the first operand to a LO_SUM. */
1929
1930 static rtx
1931 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1932 enum mips_symbol_type symbol_type)
1933 {
1934 if (mips_split_p[symbol_type])
1935 {
1936 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1937 addr = mips_force_temporary (temp, addr);
1938 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1939 }
1940 return base;
1941 }
1942
1943
1944 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1945 mips_force_temporary; it is only needed when OFFSET is not a
1946 SMALL_OPERAND. */
1947
1948 static rtx
1949 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1950 {
1951 if (!SMALL_OPERAND (offset))
1952 {
1953 rtx high;
1954 if (TARGET_MIPS16)
1955 {
1956 /* Load the full offset into a register so that we can use
1957 an unextended instruction for the address itself. */
1958 high = GEN_INT (offset);
1959 offset = 0;
1960 }
1961 else
1962 {
1963 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1964 high = GEN_INT (CONST_HIGH_PART (offset));
1965 offset = CONST_LOW_PART (offset);
1966 }
1967 high = mips_force_temporary (temp, high);
1968 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1969 }
1970 return plus_constant (reg, offset);
1971 }
1972
1973 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1974 referencing, and TYPE is the symbol type to use (either global
1975 dynamic or local dynamic). V0 is an RTX for the return value
1976 location. The entire insn sequence is returned. */
1977
1978 static GTY(()) rtx mips_tls_symbol;
1979
1980 static rtx
1981 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
1982 {
1983 rtx insn, loc, tga, a0;
1984
1985 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
1986
1987 if (!mips_tls_symbol)
1988 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
1989
1990 loc = mips_unspec_address (sym, type);
1991
1992 start_sequence ();
1993
1994 emit_insn (gen_rtx_SET (Pmode, a0,
1995 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
1996 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
1997 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
1998 CONST_OR_PURE_CALL_P (insn) = 1;
1999 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2000 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2001 insn = get_insns ();
2002
2003 end_sequence ();
2004
2005 return insn;
2006 }
2007
2008 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2009 return value will be a valid address and move_operand (either a REG
2010 or a LO_SUM). */
2011
2012 static rtx
2013 mips_legitimize_tls_address (rtx loc)
2014 {
2015 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2016 enum tls_model model;
2017
2018 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2019 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2020
2021 model = SYMBOL_REF_TLS_MODEL (loc);
2022
2023 switch (model)
2024 {
2025 case TLS_MODEL_GLOBAL_DYNAMIC:
2026 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2027 dest = gen_reg_rtx (Pmode);
2028 emit_libcall_block (insn, dest, v0, loc);
2029 break;
2030
2031 case TLS_MODEL_LOCAL_DYNAMIC:
2032 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2033 tmp1 = gen_reg_rtx (Pmode);
2034
2035 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2036 share the LDM result with other LD model accesses. */
2037 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2038 UNSPEC_TLS_LDM);
2039 emit_libcall_block (insn, tmp1, v0, eqv);
2040
2041 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2042 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2043 mips_unspec_address (loc, SYMBOL_DTPREL));
2044 break;
2045
2046 case TLS_MODEL_INITIAL_EXEC:
2047 tmp1 = gen_reg_rtx (Pmode);
2048 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2049 if (Pmode == DImode)
2050 {
2051 emit_insn (gen_tls_get_tp_di (v1));
2052 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2053 }
2054 else
2055 {
2056 emit_insn (gen_tls_get_tp_si (v1));
2057 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2058 }
2059 dest = gen_reg_rtx (Pmode);
2060 emit_insn (gen_add3_insn (dest, tmp1, v1));
2061 break;
2062
2063 case TLS_MODEL_LOCAL_EXEC:
2064
2065 if (Pmode == DImode)
2066 emit_insn (gen_tls_get_tp_di (v1));
2067 else
2068 emit_insn (gen_tls_get_tp_si (v1));
2069
2070 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2071 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2072 mips_unspec_address (loc, SYMBOL_TPREL));
2073 break;
2074
2075 default:
2076 gcc_unreachable ();
2077 }
2078
2079 return dest;
2080 }
2081
2082 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2083 be legitimized in a way that the generic machinery might not expect,
2084 put the new address in *XLOC and return true. MODE is the mode of
2085 the memory being accessed. */
2086
2087 bool
2088 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2089 {
2090 enum mips_symbol_type symbol_type;
2091
2092 if (mips_tls_operand_p (*xloc))
2093 {
2094 *xloc = mips_legitimize_tls_address (*xloc);
2095 return true;
2096 }
2097
2098 /* See if the address can split into a high part and a LO_SUM. */
2099 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2100 && mips_symbolic_address_p (symbol_type, mode)
2101 && mips_split_p[symbol_type])
2102 {
2103 *xloc = mips_split_symbol (0, *xloc);
2104 return true;
2105 }
2106
2107 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2108 {
2109 /* Handle REG + CONSTANT using mips_add_offset. */
2110 rtx reg;
2111
2112 reg = XEXP (*xloc, 0);
2113 if (!mips_valid_base_register_p (reg, mode, 0))
2114 reg = copy_to_mode_reg (Pmode, reg);
2115 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2116 return true;
2117 }
2118
2119 return false;
2120 }
2121
2122
2123 /* Subroutine of mips_build_integer (with the same interface).
2124 Assume that the final action in the sequence should be a left shift. */
2125
2126 static unsigned int
2127 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2128 {
2129 unsigned int i, shift;
2130
2131 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2132 since signed numbers are easier to load than unsigned ones. */
2133 shift = 0;
2134 while ((value & 1) == 0)
2135 value /= 2, shift++;
2136
2137 i = mips_build_integer (codes, value);
2138 codes[i].code = ASHIFT;
2139 codes[i].value = shift;
2140 return i + 1;
2141 }
2142
2143
2144 /* As for mips_build_shift, but assume that the final action will be
2145 an IOR or PLUS operation. */
2146
2147 static unsigned int
2148 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2149 {
2150 unsigned HOST_WIDE_INT high;
2151 unsigned int i;
2152
2153 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2154 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2155 {
2156 /* The constant is too complex to load with a simple lui/ori pair
2157 so our goal is to clear as many trailing zeros as possible.
2158 In this case, we know bit 16 is set and that the low 16 bits
2159 form a negative number. If we subtract that number from VALUE,
2160 we will clear at least the lowest 17 bits, maybe more. */
2161 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2162 codes[i].code = PLUS;
2163 codes[i].value = CONST_LOW_PART (value);
2164 }
2165 else
2166 {
2167 i = mips_build_integer (codes, high);
2168 codes[i].code = IOR;
2169 codes[i].value = value & 0xffff;
2170 }
2171 return i + 1;
2172 }
2173
2174
2175 /* Fill CODES with a sequence of rtl operations to load VALUE.
2176 Return the number of operations needed. */
2177
2178 static unsigned int
2179 mips_build_integer (struct mips_integer_op *codes,
2180 unsigned HOST_WIDE_INT value)
2181 {
2182 if (SMALL_OPERAND (value)
2183 || SMALL_OPERAND_UNSIGNED (value)
2184 || LUI_OPERAND (value))
2185 {
2186 /* The value can be loaded with a single instruction. */
2187 codes[0].code = UNKNOWN;
2188 codes[0].value = value;
2189 return 1;
2190 }
2191 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2192 {
2193 /* Either the constant is a simple LUI/ORI combination or its
2194 lowest bit is set. We don't want to shift in this case. */
2195 return mips_build_lower (codes, value);
2196 }
2197 else if ((value & 0xffff) == 0)
2198 {
2199 /* The constant will need at least three actions. The lowest
2200 16 bits are clear, so the final action will be a shift. */
2201 return mips_build_shift (codes, value);
2202 }
2203 else
2204 {
2205 /* The final action could be a shift, add or inclusive OR.
2206 Rather than use a complex condition to select the best
2207 approach, try both mips_build_shift and mips_build_lower
2208 and pick the one that gives the shortest sequence.
2209 Note that this case is only used once per constant. */
2210 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2211 unsigned int cost, alt_cost;
2212
2213 cost = mips_build_shift (codes, value);
2214 alt_cost = mips_build_lower (alt_codes, value);
2215 if (alt_cost < cost)
2216 {
2217 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2218 cost = alt_cost;
2219 }
2220 return cost;
2221 }
2222 }
2223
2224
2225 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2226
2227 void
2228 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2229 {
2230 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2231 enum machine_mode mode;
2232 unsigned int i, cost;
2233 rtx x;
2234
2235 mode = GET_MODE (dest);
2236 cost = mips_build_integer (codes, value);
2237
2238 /* Apply each binary operation to X. Invariant: X is a legitimate
2239 source operand for a SET pattern. */
2240 x = GEN_INT (codes[0].value);
2241 for (i = 1; i < cost; i++)
2242 {
2243 if (no_new_pseudos)
2244 {
2245 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2246 x = temp;
2247 }
2248 else
2249 x = force_reg (mode, x);
2250 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2251 }
2252
2253 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2254 }
2255
2256
2257 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2258 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2259 move_operand. */
2260
2261 static void
2262 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2263 {
2264 rtx base;
2265 HOST_WIDE_INT offset;
2266
2267 /* Split moves of big integers into smaller pieces. */
2268 if (splittable_const_int_operand (src, mode))
2269 {
2270 mips_move_integer (dest, dest, INTVAL (src));
2271 return;
2272 }
2273
2274 /* Split moves of symbolic constants into high/low pairs. */
2275 if (splittable_symbolic_operand (src, mode))
2276 {
2277 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2278 return;
2279 }
2280
2281 if (mips_tls_operand_p (src))
2282 {
2283 emit_move_insn (dest, mips_legitimize_tls_address (src));
2284 return;
2285 }
2286
2287 /* If we have (const (plus symbol offset)), load the symbol first
2288 and then add in the offset. This is usually better than forcing
2289 the constant into memory, at least in non-mips16 code. */
2290 mips_split_const (src, &base, &offset);
2291 if (!TARGET_MIPS16
2292 && offset != 0
2293 && (!no_new_pseudos || SMALL_OPERAND (offset)))
2294 {
2295 base = mips_force_temporary (dest, base);
2296 emit_move_insn (dest, mips_add_offset (0, base, offset));
2297 return;
2298 }
2299
2300 src = force_const_mem (mode, src);
2301
2302 /* When using explicit relocs, constant pool references are sometimes
2303 not legitimate addresses. */
2304 if (!memory_operand (src, VOIDmode))
2305 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2306 emit_move_insn (dest, src);
2307 }
2308
2309
2310 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2311 sequence that is valid. */
2312
2313 bool
2314 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2315 {
2316 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2317 {
2318 emit_move_insn (dest, force_reg (mode, src));
2319 return true;
2320 }
2321
2322 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2323 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2324 && REG_P (src) && MD_REG_P (REGNO (src))
2325 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2326 {
2327 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2328 if (GET_MODE_SIZE (mode) <= 4)
2329 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2330 gen_rtx_REG (SImode, REGNO (src)),
2331 gen_rtx_REG (SImode, other_regno)));
2332 else
2333 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2334 gen_rtx_REG (DImode, REGNO (src)),
2335 gen_rtx_REG (DImode, other_regno)));
2336 return true;
2337 }
2338
2339 /* We need to deal with constants that would be legitimate
2340 immediate_operands but not legitimate move_operands. */
2341 if (CONSTANT_P (src) && !move_operand (src, mode))
2342 {
2343 mips_legitimize_const_move (mode, dest, src);
2344 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2345 return true;
2346 }
2347 return false;
2348 }
2349 \f
2350 /* We need a lot of little routines to check constant values on the
2351 mips16. These are used to figure out how long the instruction will
2352 be. It would be much better to do this using constraints, but
2353 there aren't nearly enough letters available. */
2354
2355 static int
2356 m16_check_op (rtx op, int low, int high, int mask)
2357 {
2358 return (GET_CODE (op) == CONST_INT
2359 && INTVAL (op) >= low
2360 && INTVAL (op) <= high
2361 && (INTVAL (op) & mask) == 0);
2362 }
2363
2364 int
2365 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2366 {
2367 return m16_check_op (op, 0x1, 0x8, 0);
2368 }
2369
2370 int
2371 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2372 {
2373 return m16_check_op (op, - 0x8, 0x7, 0);
2374 }
2375
2376 int
2377 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2378 {
2379 return m16_check_op (op, - 0x7, 0x8, 0);
2380 }
2381
2382 int
2383 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2384 {
2385 return m16_check_op (op, - 0x10, 0xf, 0);
2386 }
2387
2388 int
2389 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2390 {
2391 return m16_check_op (op, - 0xf, 0x10, 0);
2392 }
2393
2394 int
2395 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2396 {
2397 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2398 }
2399
2400 int
2401 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2402 {
2403 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2404 }
2405
2406 int
2407 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2408 {
2409 return m16_check_op (op, - 0x80, 0x7f, 0);
2410 }
2411
2412 int
2413 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2414 {
2415 return m16_check_op (op, - 0x7f, 0x80, 0);
2416 }
2417
2418 int
2419 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2420 {
2421 return m16_check_op (op, 0x0, 0xff, 0);
2422 }
2423
2424 int
2425 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2426 {
2427 return m16_check_op (op, - 0xff, 0x0, 0);
2428 }
2429
2430 int
2431 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2432 {
2433 return m16_check_op (op, - 0x1, 0xfe, 0);
2434 }
2435
2436 int
2437 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2438 {
2439 return m16_check_op (op, 0x0, 0xff << 2, 3);
2440 }
2441
2442 int
2443 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2444 {
2445 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2446 }
2447
2448 int
2449 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2450 {
2451 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2452 }
2453
2454 int
2455 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2456 {
2457 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2458 }
2459 \f
2460 static bool
2461 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2462 {
2463 enum machine_mode mode = GET_MODE (x);
2464 bool float_mode_p = FLOAT_MODE_P (mode);
2465
2466 switch (code)
2467 {
2468 case CONST_INT:
2469 if (TARGET_MIPS16)
2470 {
2471 /* A number between 1 and 8 inclusive is efficient for a shift.
2472 Otherwise, we will need an extended instruction. */
2473 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2474 || (outer_code) == LSHIFTRT)
2475 {
2476 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2477 *total = 0;
2478 else
2479 *total = COSTS_N_INSNS (1);
2480 return true;
2481 }
2482
2483 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2484 if ((outer_code) == XOR
2485 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2486 {
2487 *total = 0;
2488 return true;
2489 }
2490
2491 /* We may be able to use slt or sltu for a comparison with a
2492 signed 16 bit value. (The boundary conditions aren't quite
2493 right, but this is just a heuristic anyhow.) */
2494 if (((outer_code) == LT || (outer_code) == LE
2495 || (outer_code) == GE || (outer_code) == GT
2496 || (outer_code) == LTU || (outer_code) == LEU
2497 || (outer_code) == GEU || (outer_code) == GTU)
2498 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2499 {
2500 *total = 0;
2501 return true;
2502 }
2503
2504 /* Equality comparisons with 0 are cheap. */
2505 if (((outer_code) == EQ || (outer_code) == NE)
2506 && INTVAL (x) == 0)
2507 {
2508 *total = 0;
2509 return true;
2510 }
2511
2512 /* Constants in the range 0...255 can be loaded with an unextended
2513 instruction. They are therefore as cheap as a register move.
2514
2515 Given the choice between "li R1,0...255" and "move R1,R2"
2516 (where R2 is a known constant), it is usually better to use "li",
2517 since we do not want to unnecessarily extend the lifetime
2518 of R2. */
2519 if (outer_code == SET
2520 && INTVAL (x) >= 0
2521 && INTVAL (x) < 256)
2522 {
2523 *total = 0;
2524 return true;
2525 }
2526 }
2527 else
2528 {
2529 /* These can be used anywhere. */
2530 *total = 0;
2531 return true;
2532 }
2533
2534 /* Otherwise fall through to the handling below because
2535 we'll need to construct the constant. */
2536
2537 case CONST:
2538 case SYMBOL_REF:
2539 case LABEL_REF:
2540 case CONST_DOUBLE:
2541 if (LEGITIMATE_CONSTANT_P (x))
2542 {
2543 *total = COSTS_N_INSNS (1);
2544 return true;
2545 }
2546 else
2547 {
2548 /* The value will need to be fetched from the constant pool. */
2549 *total = CONSTANT_POOL_COST;
2550 return true;
2551 }
2552
2553 case MEM:
2554 {
2555 /* If the address is legitimate, return the number of
2556 instructions it needs, otherwise use the default handling. */
2557 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2558 if (n > 0)
2559 {
2560 *total = COSTS_N_INSNS (n + 1);
2561 return true;
2562 }
2563 return false;
2564 }
2565
2566 case FFS:
2567 *total = COSTS_N_INSNS (6);
2568 return true;
2569
2570 case NOT:
2571 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2572 return true;
2573
2574 case AND:
2575 case IOR:
2576 case XOR:
2577 if (mode == DImode && !TARGET_64BIT)
2578 {
2579 *total = COSTS_N_INSNS (2);
2580 return true;
2581 }
2582 return false;
2583
2584 case ASHIFT:
2585 case ASHIFTRT:
2586 case LSHIFTRT:
2587 if (mode == DImode && !TARGET_64BIT)
2588 {
2589 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2590 ? 4 : 12);
2591 return true;
2592 }
2593 return false;
2594
2595 case ABS:
2596 if (float_mode_p)
2597 *total = COSTS_N_INSNS (1);
2598 else
2599 *total = COSTS_N_INSNS (4);
2600 return true;
2601
2602 case LO_SUM:
2603 *total = COSTS_N_INSNS (1);
2604 return true;
2605
2606 case PLUS:
2607 case MINUS:
2608 if (float_mode_p)
2609 {
2610 *total = mips_cost->fp_add;
2611 return true;
2612 }
2613
2614 else if (mode == DImode && !TARGET_64BIT)
2615 {
2616 *total = COSTS_N_INSNS (4);
2617 return true;
2618 }
2619 return false;
2620
2621 case NEG:
2622 if (mode == DImode && !TARGET_64BIT)
2623 {
2624 *total = COSTS_N_INSNS (4);
2625 return true;
2626 }
2627 return false;
2628
2629 case MULT:
2630 if (mode == SFmode)
2631 *total = mips_cost->fp_mult_sf;
2632
2633 else if (mode == DFmode)
2634 *total = mips_cost->fp_mult_df;
2635
2636 else if (mode == SImode)
2637 *total = mips_cost->int_mult_si;
2638
2639 else
2640 *total = mips_cost->int_mult_di;
2641
2642 return true;
2643
2644 case DIV:
2645 case MOD:
2646 if (float_mode_p)
2647 {
2648 if (mode == SFmode)
2649 *total = mips_cost->fp_div_sf;
2650 else
2651 *total = mips_cost->fp_div_df;
2652
2653 return true;
2654 }
2655 /* Fall through. */
2656
2657 case UDIV:
2658 case UMOD:
2659 if (mode == DImode)
2660 *total = mips_cost->int_div_di;
2661 else
2662 *total = mips_cost->int_div_si;
2663
2664 return true;
2665
2666 case SIGN_EXTEND:
2667 /* A sign extend from SImode to DImode in 64 bit mode is often
2668 zero instructions, because the result can often be used
2669 directly by another instruction; we'll call it one. */
2670 if (TARGET_64BIT && mode == DImode
2671 && GET_MODE (XEXP (x, 0)) == SImode)
2672 *total = COSTS_N_INSNS (1);
2673 else
2674 *total = COSTS_N_INSNS (2);
2675 return true;
2676
2677 case ZERO_EXTEND:
2678 if (TARGET_64BIT && mode == DImode
2679 && GET_MODE (XEXP (x, 0)) == SImode)
2680 *total = COSTS_N_INSNS (2);
2681 else
2682 *total = COSTS_N_INSNS (1);
2683 return true;
2684
2685 case FLOAT:
2686 case UNSIGNED_FLOAT:
2687 case FIX:
2688 case FLOAT_EXTEND:
2689 case FLOAT_TRUNCATE:
2690 case SQRT:
2691 *total = mips_cost->fp_add;
2692 return true;
2693
2694 default:
2695 return false;
2696 }
2697 }
2698
2699 /* Provide the costs of an addressing mode that contains ADDR.
2700 If ADDR is not a valid address, its cost is irrelevant. */
2701
2702 static int
2703 mips_address_cost (rtx addr)
2704 {
2705 return mips_address_insns (addr, SImode);
2706 }
2707 \f
2708 /* Return one word of double-word value OP, taking into account the fixed
2709 endianness of certain registers. HIGH_P is true to select the high part,
2710 false to select the low part. */
2711
2712 rtx
2713 mips_subword (rtx op, int high_p)
2714 {
2715 unsigned int byte;
2716 enum machine_mode mode;
2717
2718 mode = GET_MODE (op);
2719 if (mode == VOIDmode)
2720 mode = DImode;
2721
2722 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2723 byte = UNITS_PER_WORD;
2724 else
2725 byte = 0;
2726
2727 if (REG_P (op))
2728 {
2729 if (FP_REG_P (REGNO (op)))
2730 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2731 if (ACC_HI_REG_P (REGNO (op)))
2732 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2733 }
2734
2735 if (MEM_P (op))
2736 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2737
2738 return simplify_gen_subreg (word_mode, op, mode, byte);
2739 }
2740
2741
2742 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2743
2744 bool
2745 mips_split_64bit_move_p (rtx dest, rtx src)
2746 {
2747 if (TARGET_64BIT)
2748 return false;
2749
2750 /* FP->FP moves can be done in a single instruction. */
2751 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2752 return false;
2753
2754 /* Check for floating-point loads and stores. They can be done using
2755 ldc1 and sdc1 on MIPS II and above. */
2756 if (mips_isa > 1)
2757 {
2758 if (FP_REG_RTX_P (dest) && MEM_P (src))
2759 return false;
2760 if (FP_REG_RTX_P (src) && MEM_P (dest))
2761 return false;
2762 }
2763 return true;
2764 }
2765
2766
2767 /* Split a 64-bit move from SRC to DEST assuming that
2768 mips_split_64bit_move_p holds.
2769
2770 Moves into and out of FPRs cause some difficulty here. Such moves
2771 will always be DFmode, since paired FPRs are not allowed to store
2772 DImode values. The most natural representation would be two separate
2773 32-bit moves, such as:
2774
2775 (set (reg:SI $f0) (mem:SI ...))
2776 (set (reg:SI $f1) (mem:SI ...))
2777
2778 However, the second insn is invalid because odd-numbered FPRs are
2779 not allowed to store independent values. Use the patterns load_df_low,
2780 load_df_high and store_df_high instead. */
2781
2782 void
2783 mips_split_64bit_move (rtx dest, rtx src)
2784 {
2785 if (FP_REG_RTX_P (dest))
2786 {
2787 /* Loading an FPR from memory or from GPRs. */
2788 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2789 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2790 copy_rtx (dest)));
2791 }
2792 else if (FP_REG_RTX_P (src))
2793 {
2794 /* Storing an FPR into memory or GPRs. */
2795 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2796 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2797 }
2798 else
2799 {
2800 /* The operation can be split into two normal moves. Decide in
2801 which order to do them. */
2802 rtx low_dest;
2803
2804 low_dest = mips_subword (dest, 0);
2805 if (REG_P (low_dest)
2806 && reg_overlap_mentioned_p (low_dest, src))
2807 {
2808 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2809 emit_move_insn (low_dest, mips_subword (src, 0));
2810 }
2811 else
2812 {
2813 emit_move_insn (low_dest, mips_subword (src, 0));
2814 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2815 }
2816 }
2817 }
2818 \f
2819 /* Return the appropriate instructions to move SRC into DEST. Assume
2820 that SRC is operand 1 and DEST is operand 0. */
2821
2822 const char *
2823 mips_output_move (rtx dest, rtx src)
2824 {
2825 enum rtx_code dest_code, src_code;
2826 bool dbl_p;
2827
2828 dest_code = GET_CODE (dest);
2829 src_code = GET_CODE (src);
2830 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2831
2832 if (dbl_p && mips_split_64bit_move_p (dest, src))
2833 return "#";
2834
2835 if ((src_code == REG && GP_REG_P (REGNO (src)))
2836 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2837 {
2838 if (dest_code == REG)
2839 {
2840 if (GP_REG_P (REGNO (dest)))
2841 return "move\t%0,%z1";
2842
2843 if (MD_REG_P (REGNO (dest)))
2844 return "mt%0\t%z1";
2845
2846 if (DSP_ACC_REG_P (REGNO (dest)))
2847 {
2848 static char retval[] = "mt__\t%z1,%q0";
2849 retval[2] = reg_names[REGNO (dest)][4];
2850 retval[3] = reg_names[REGNO (dest)][5];
2851 return retval;
2852 }
2853
2854 if (FP_REG_P (REGNO (dest)))
2855 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2856
2857 if (ALL_COP_REG_P (REGNO (dest)))
2858 {
2859 static char retval[] = "dmtc_\t%z1,%0";
2860
2861 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2862 return (dbl_p ? retval : retval + 1);
2863 }
2864 }
2865 if (dest_code == MEM)
2866 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2867 }
2868 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2869 {
2870 if (src_code == REG)
2871 {
2872 if (DSP_ACC_REG_P (REGNO (src)))
2873 {
2874 static char retval[] = "mf__\t%0,%q1";
2875 retval[2] = reg_names[REGNO (src)][4];
2876 retval[3] = reg_names[REGNO (src)][5];
2877 return retval;
2878 }
2879
2880 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2881 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2882
2883 if (FP_REG_P (REGNO (src)))
2884 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2885
2886 if (ALL_COP_REG_P (REGNO (src)))
2887 {
2888 static char retval[] = "dmfc_\t%0,%1";
2889
2890 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2891 return (dbl_p ? retval : retval + 1);
2892 }
2893 }
2894
2895 if (src_code == MEM)
2896 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2897
2898 if (src_code == CONST_INT)
2899 {
2900 /* Don't use the X format, because that will give out of
2901 range numbers for 64 bit hosts and 32 bit targets. */
2902 if (!TARGET_MIPS16)
2903 return "li\t%0,%1\t\t\t# %X1";
2904
2905 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2906 return "li\t%0,%1";
2907
2908 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2909 return "#";
2910 }
2911
2912 if (src_code == HIGH)
2913 return "lui\t%0,%h1";
2914
2915 if (CONST_GP_P (src))
2916 return "move\t%0,%1";
2917
2918 if (symbolic_operand (src, VOIDmode))
2919 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2920 }
2921 if (src_code == REG && FP_REG_P (REGNO (src)))
2922 {
2923 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2924 {
2925 if (GET_MODE (dest) == V2SFmode)
2926 return "mov.ps\t%0,%1";
2927 else
2928 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2929 }
2930
2931 if (dest_code == MEM)
2932 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2933 }
2934 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2935 {
2936 if (src_code == MEM)
2937 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2938 }
2939 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2940 {
2941 static char retval[] = "l_c_\t%0,%1";
2942
2943 retval[1] = (dbl_p ? 'd' : 'w');
2944 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2945 return retval;
2946 }
2947 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2948 {
2949 static char retval[] = "s_c_\t%1,%0";
2950
2951 retval[1] = (dbl_p ? 'd' : 'w');
2952 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2953 return retval;
2954 }
2955 gcc_unreachable ();
2956 }
2957 \f
2958 /* Restore $gp from its save slot. Valid only when using o32 or
2959 o64 abicalls. */
2960
2961 void
2962 mips_restore_gp (void)
2963 {
2964 rtx address, slot;
2965
2966 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2967
2968 address = mips_add_offset (pic_offset_table_rtx,
2969 frame_pointer_needed
2970 ? hard_frame_pointer_rtx
2971 : stack_pointer_rtx,
2972 current_function_outgoing_args_size);
2973 slot = gen_rtx_MEM (Pmode, address);
2974
2975 emit_move_insn (pic_offset_table_rtx, slot);
2976 if (!TARGET_EXPLICIT_RELOCS)
2977 emit_insn (gen_blockage ());
2978 }
2979 \f
2980 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2981
2982 static void
2983 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2984 {
2985 emit_insn (gen_rtx_SET (VOIDmode, target,
2986 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2987 }
2988
2989 /* Return true if CMP1 is a suitable second operand for relational
2990 operator CODE. See also the *sCC patterns in mips.md. */
2991
2992 static bool
2993 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2994 {
2995 switch (code)
2996 {
2997 case GT:
2998 case GTU:
2999 return reg_or_0_operand (cmp1, VOIDmode);
3000
3001 case GE:
3002 case GEU:
3003 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3004
3005 case LT:
3006 case LTU:
3007 return arith_operand (cmp1, VOIDmode);
3008
3009 case LE:
3010 return sle_operand (cmp1, VOIDmode);
3011
3012 case LEU:
3013 return sleu_operand (cmp1, VOIDmode);
3014
3015 default:
3016 gcc_unreachable ();
3017 }
3018 }
3019
3020 /* Canonicalize LE or LEU comparisons into LT comparisons when
3021 possible to avoid extra instructions or inverting the
3022 comparison. */
3023
3024 static bool
3025 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3026 enum machine_mode mode)
3027 {
3028 HOST_WIDE_INT original, plus_one;
3029
3030 if (GET_CODE (*cmp1) != CONST_INT)
3031 return false;
3032
3033 original = INTVAL (*cmp1);
3034 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3035
3036 switch (*code)
3037 {
3038 case LE:
3039 if (original < plus_one)
3040 {
3041 *code = LT;
3042 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3043 return true;
3044 }
3045 break;
3046
3047 case LEU:
3048 if (plus_one != 0)
3049 {
3050 *code = LTU;
3051 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3052 return true;
3053 }
3054 break;
3055
3056 default:
3057 return false;
3058 }
3059
3060 return false;
3061
3062 }
3063
3064 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3065 result in TARGET. CMP0 and TARGET are register_operands that have
3066 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3067 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3068
3069 static void
3070 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3071 rtx target, rtx cmp0, rtx cmp1)
3072 {
3073 /* First see if there is a MIPS instruction that can do this operation
3074 with CMP1 in its current form. If not, try to canonicalize the
3075 comparison to LT. If that fails, try doing the same for the
3076 inverse operation. If that also fails, force CMP1 into a register
3077 and try again. */
3078 if (mips_relational_operand_ok_p (code, cmp1))
3079 mips_emit_binary (code, target, cmp0, cmp1);
3080 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3081 mips_emit_binary (code, target, cmp0, cmp1);
3082 else
3083 {
3084 enum rtx_code inv_code = reverse_condition (code);
3085 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3086 {
3087 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3088 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3089 }
3090 else if (invert_ptr == 0)
3091 {
3092 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3093 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3094 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3095 }
3096 else
3097 {
3098 *invert_ptr = !*invert_ptr;
3099 mips_emit_binary (inv_code, target, cmp0, cmp1);
3100 }
3101 }
3102 }
3103
3104 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3105 The register will have the same mode as CMP0. */
3106
3107 static rtx
3108 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3109 {
3110 if (cmp1 == const0_rtx)
3111 return cmp0;
3112
3113 if (uns_arith_operand (cmp1, VOIDmode))
3114 return expand_binop (GET_MODE (cmp0), xor_optab,
3115 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3116
3117 return expand_binop (GET_MODE (cmp0), sub_optab,
3118 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3119 }
3120
3121 /* Convert a comparison into something that can be used in a branch or
3122 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3123 being compared and *CODE is the code used to compare them.
3124
3125 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3126 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3127 otherwise any standard branch condition can be used. The standard branch
3128 conditions are:
3129
3130 - EQ/NE between two registers.
3131 - any comparison between a register and zero. */
3132
3133 static void
3134 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3135 {
3136 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3137 {
3138 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3139 {
3140 *op0 = cmp_operands[0];
3141 *op1 = cmp_operands[1];
3142 }
3143 else if (*code == EQ || *code == NE)
3144 {
3145 if (need_eq_ne_p)
3146 {
3147 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3148 *op1 = const0_rtx;
3149 }
3150 else
3151 {
3152 *op0 = cmp_operands[0];
3153 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3154 }
3155 }
3156 else
3157 {
3158 /* The comparison needs a separate scc instruction. Store the
3159 result of the scc in *OP0 and compare it against zero. */
3160 bool invert = false;
3161 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3162 *op1 = const0_rtx;
3163 mips_emit_int_relational (*code, &invert, *op0,
3164 cmp_operands[0], cmp_operands[1]);
3165 *code = (invert ? EQ : NE);
3166 }
3167 }
3168 else
3169 {
3170 enum rtx_code cmp_code;
3171
3172 /* Floating-point tests use a separate c.cond.fmt comparison to
3173 set a condition code register. The branch or conditional move
3174 will then compare that register against zero.
3175
3176 Set CMP_CODE to the code of the comparison instruction and
3177 *CODE to the code that the branch or move should use. */
3178 switch (*code)
3179 {
3180 case NE:
3181 case LTGT:
3182 case ORDERED:
3183 cmp_code = reverse_condition_maybe_unordered (*code);
3184 *code = EQ;
3185 break;
3186
3187 default:
3188 cmp_code = *code;
3189 *code = NE;
3190 break;
3191 }
3192 *op0 = (ISA_HAS_8CC
3193 ? gen_reg_rtx (CCmode)
3194 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3195 *op1 = const0_rtx;
3196 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3197 }
3198 }
3199 \f
3200 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3201 Store the result in TARGET and return true if successful.
3202
3203 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3204
3205 bool
3206 mips_emit_scc (enum rtx_code code, rtx target)
3207 {
3208 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3209 return false;
3210
3211 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3212 if (code == EQ || code == NE)
3213 {
3214 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3215 mips_emit_binary (code, target, zie, const0_rtx);
3216 }
3217 else
3218 mips_emit_int_relational (code, 0, target,
3219 cmp_operands[0], cmp_operands[1]);
3220 return true;
3221 }
3222
3223 /* Emit the common code for doing conditional branches.
3224 operand[0] is the label to jump to.
3225 The comparison operands are saved away by cmp{si,di,sf,df}. */
3226
3227 void
3228 gen_conditional_branch (rtx *operands, enum rtx_code code)
3229 {
3230 rtx op0, op1, condition;
3231
3232 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3233 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3234 emit_jump_insn (gen_condjump (condition, operands[0]));
3235 }
3236
3237 /* Emit the common code for conditional moves. OPERANDS is the array
3238 of operands passed to the conditional move define_expand. */
3239
3240 void
3241 gen_conditional_move (rtx *operands)
3242 {
3243 enum rtx_code code;
3244 rtx op0, op1;
3245
3246 code = GET_CODE (operands[1]);
3247 mips_emit_compare (&code, &op0, &op1, true);
3248 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3249 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3250 gen_rtx_fmt_ee (code,
3251 GET_MODE (op0),
3252 op0, op1),
3253 operands[2], operands[3])));
3254 }
3255
3256 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3257 the conditional_trap expander. */
3258
3259 void
3260 mips_gen_conditional_trap (rtx *operands)
3261 {
3262 rtx op0, op1;
3263 enum rtx_code cmp_code = GET_CODE (operands[0]);
3264 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3265
3266 /* MIPS conditional trap machine instructions don't have GT or LE
3267 flavors, so we must invert the comparison and convert to LT and
3268 GE, respectively. */
3269 switch (cmp_code)
3270 {
3271 case GT: cmp_code = LT; break;
3272 case LE: cmp_code = GE; break;
3273 case GTU: cmp_code = LTU; break;
3274 case LEU: cmp_code = GEU; break;
3275 default: break;
3276 }
3277 if (cmp_code == GET_CODE (operands[0]))
3278 {
3279 op0 = cmp_operands[0];
3280 op1 = cmp_operands[1];
3281 }
3282 else
3283 {
3284 op0 = cmp_operands[1];
3285 op1 = cmp_operands[0];
3286 }
3287 op0 = force_reg (mode, op0);
3288 if (!arith_operand (op1, mode))
3289 op1 = force_reg (mode, op1);
3290
3291 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3292 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3293 operands[1]));
3294 }
3295 \f
3296 /* Load function address ADDR into register DEST. SIBCALL_P is true
3297 if the address is needed for a sibling call. */
3298
3299 static void
3300 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3301 {
3302 /* If we're generating PIC, and this call is to a global function,
3303 try to allow its address to be resolved lazily. This isn't
3304 possible for NewABI sibcalls since the value of $gp on entry
3305 to the stub would be our caller's gp, not ours. */
3306 if (TARGET_EXPLICIT_RELOCS
3307 && !(sibcall_p && TARGET_NEWABI)
3308 && global_got_operand (addr, VOIDmode))
3309 {
3310 rtx high, lo_sum_symbol;
3311
3312 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3313 addr, SYMBOL_GOTOFF_CALL);
3314 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3315 if (Pmode == SImode)
3316 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3317 else
3318 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3319 }
3320 else
3321 emit_move_insn (dest, addr);
3322 }
3323
3324
3325 /* Expand a call or call_value instruction. RESULT is where the
3326 result will go (null for calls), ADDR is the address of the
3327 function, ARGS_SIZE is the size of the arguments and AUX is
3328 the value passed to us by mips_function_arg. SIBCALL_P is true
3329 if we are expanding a sibling call, false if we're expanding
3330 a normal call. */
3331
3332 void
3333 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3334 {
3335 rtx orig_addr, pattern, insn;
3336
3337 orig_addr = addr;
3338 if (!call_insn_operand (addr, VOIDmode))
3339 {
3340 addr = gen_reg_rtx (Pmode);
3341 mips_load_call_address (addr, orig_addr, sibcall_p);
3342 }
3343
3344 if (TARGET_MIPS16
3345 && mips16_hard_float
3346 && build_mips16_call_stub (result, addr, args_size,
3347 aux == 0 ? 0 : (int) GET_MODE (aux)))
3348 return;
3349
3350 if (result == 0)
3351 pattern = (sibcall_p
3352 ? gen_sibcall_internal (addr, args_size)
3353 : gen_call_internal (addr, args_size));
3354 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3355 {
3356 rtx reg1, reg2;
3357
3358 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3359 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3360 pattern =
3361 (sibcall_p
3362 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3363 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3364 }
3365 else
3366 pattern = (sibcall_p
3367 ? gen_sibcall_value_internal (result, addr, args_size)
3368 : gen_call_value_internal (result, addr, args_size));
3369
3370 insn = emit_call_insn (pattern);
3371
3372 /* Lazy-binding stubs require $gp to be valid on entry. */
3373 if (global_got_operand (orig_addr, VOIDmode))
3374 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3375 }
3376
3377
3378 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3379
3380 static bool
3381 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3382 tree exp ATTRIBUTE_UNUSED)
3383 {
3384 return TARGET_SIBCALLS;
3385 }
3386 \f
3387 /* Emit code to move general operand SRC into condition-code
3388 register DEST. SCRATCH is a scratch TFmode float register.
3389 The sequence is:
3390
3391 FP1 = SRC
3392 FP2 = 0.0f
3393 DEST = FP2 < FP1
3394
3395 where FP1 and FP2 are single-precision float registers
3396 taken from SCRATCH. */
3397
3398 void
3399 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3400 {
3401 rtx fp1, fp2;
3402
3403 /* Change the source to SFmode. */
3404 if (MEM_P (src))
3405 src = adjust_address (src, SFmode, 0);
3406 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3407 src = gen_rtx_REG (SFmode, true_regnum (src));
3408
3409 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3410 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3411
3412 emit_move_insn (copy_rtx (fp1), src);
3413 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3414 emit_insn (gen_slt_sf (dest, fp2, fp1));
3415 }
3416 \f
3417 /* Emit code to change the current function's return address to
3418 ADDRESS. SCRATCH is available as a scratch register, if needed.
3419 ADDRESS and SCRATCH are both word-mode GPRs. */
3420
3421 void
3422 mips_set_return_address (rtx address, rtx scratch)
3423 {
3424 rtx slot_address;
3425
3426 compute_frame_size (get_frame_size ());
3427 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3428 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3429 cfun->machine->frame.gp_sp_offset);
3430
3431 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3432 }
3433 \f
3434 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3435 Assume that the areas do not overlap. */
3436
3437 static void
3438 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3439 {
3440 HOST_WIDE_INT offset, delta;
3441 unsigned HOST_WIDE_INT bits;
3442 int i;
3443 enum machine_mode mode;
3444 rtx *regs;
3445
3446 /* Work out how many bits to move at a time. If both operands have
3447 half-word alignment, it is usually better to move in half words.
3448 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3449 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3450 Otherwise move word-sized chunks. */
3451 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3452 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3453 bits = BITS_PER_WORD / 2;
3454 else
3455 bits = BITS_PER_WORD;
3456
3457 mode = mode_for_size (bits, MODE_INT, 0);
3458 delta = bits / BITS_PER_UNIT;
3459
3460 /* Allocate a buffer for the temporary registers. */
3461 regs = alloca (sizeof (rtx) * length / delta);
3462
3463 /* Load as many BITS-sized chunks as possible. Use a normal load if
3464 the source has enough alignment, otherwise use left/right pairs. */
3465 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3466 {
3467 regs[i] = gen_reg_rtx (mode);
3468 if (MEM_ALIGN (src) >= bits)
3469 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3470 else
3471 {
3472 rtx part = adjust_address (src, BLKmode, offset);
3473 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3474 gcc_unreachable ();
3475 }
3476 }
3477
3478 /* Copy the chunks to the destination. */
3479 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3480 if (MEM_ALIGN (dest) >= bits)
3481 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3482 else
3483 {
3484 rtx part = adjust_address (dest, BLKmode, offset);
3485 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3486 gcc_unreachable ();
3487 }
3488
3489 /* Mop up any left-over bytes. */
3490 if (offset < length)
3491 {
3492 src = adjust_address (src, BLKmode, offset);
3493 dest = adjust_address (dest, BLKmode, offset);
3494 move_by_pieces (dest, src, length - offset,
3495 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3496 }
3497 }
3498 \f
3499 #define MAX_MOVE_REGS 4
3500 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3501
3502
3503 /* Helper function for doing a loop-based block operation on memory
3504 reference MEM. Each iteration of the loop will operate on LENGTH
3505 bytes of MEM.
3506
3507 Create a new base register for use within the loop and point it to
3508 the start of MEM. Create a new memory reference that uses this
3509 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3510
3511 static void
3512 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3513 rtx *loop_reg, rtx *loop_mem)
3514 {
3515 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3516
3517 /* Although the new mem does not refer to a known location,
3518 it does keep up to LENGTH bytes of alignment. */
3519 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3520 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3521 }
3522
3523
3524 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3525 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3526 memory regions do not overlap. */
3527
3528 static void
3529 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3530 {
3531 rtx label, src_reg, dest_reg, final_src;
3532 HOST_WIDE_INT leftover;
3533
3534 leftover = length % MAX_MOVE_BYTES;
3535 length -= leftover;
3536
3537 /* Create registers and memory references for use within the loop. */
3538 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3539 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3540
3541 /* Calculate the value that SRC_REG should have after the last iteration
3542 of the loop. */
3543 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3544 0, 0, OPTAB_WIDEN);
3545
3546 /* Emit the start of the loop. */
3547 label = gen_label_rtx ();
3548 emit_label (label);
3549
3550 /* Emit the loop body. */
3551 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3552
3553 /* Move on to the next block. */
3554 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3555 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3556
3557 /* Emit the loop condition. */
3558 if (Pmode == DImode)
3559 emit_insn (gen_cmpdi (src_reg, final_src));
3560 else
3561 emit_insn (gen_cmpsi (src_reg, final_src));
3562 emit_jump_insn (gen_bne (label));
3563
3564 /* Mop up any left-over bytes. */
3565 if (leftover)
3566 mips_block_move_straight (dest, src, leftover);
3567 }
3568 \f
3569 /* Expand a movmemsi instruction. */
3570
3571 bool
3572 mips_expand_block_move (rtx dest, rtx src, rtx length)
3573 {
3574 if (GET_CODE (length) == CONST_INT)
3575 {
3576 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3577 {
3578 mips_block_move_straight (dest, src, INTVAL (length));
3579 return true;
3580 }
3581 else if (optimize)
3582 {
3583 mips_block_move_loop (dest, src, INTVAL (length));
3584 return true;
3585 }
3586 }
3587 return false;
3588 }
3589 \f
3590 /* Argument support functions. */
3591
3592 /* Initialize CUMULATIVE_ARGS for a function. */
3593
3594 void
3595 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3596 rtx libname ATTRIBUTE_UNUSED)
3597 {
3598 static CUMULATIVE_ARGS zero_cum;
3599 tree param, next_param;
3600
3601 *cum = zero_cum;
3602 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3603
3604 /* Determine if this function has variable arguments. This is
3605 indicated by the last argument being 'void_type_mode' if there
3606 are no variable arguments. The standard MIPS calling sequence
3607 passes all arguments in the general purpose registers in this case. */
3608
3609 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3610 param != 0; param = next_param)
3611 {
3612 next_param = TREE_CHAIN (param);
3613 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3614 cum->gp_reg_found = 1;
3615 }
3616 }
3617
3618
3619 /* Fill INFO with information about a single argument. CUM is the
3620 cumulative state for earlier arguments. MODE is the mode of this
3621 argument and TYPE is its type (if known). NAMED is true if this
3622 is a named (fixed) argument rather than a variable one. */
3623
3624 static void
3625 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3626 tree type, int named, struct mips_arg_info *info)
3627 {
3628 bool doubleword_aligned_p;
3629 unsigned int num_bytes, num_words, max_regs;
3630
3631 /* Work out the size of the argument. */
3632 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3633 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3634
3635 /* Decide whether it should go in a floating-point register, assuming
3636 one is free. Later code checks for availability.
3637
3638 The checks against UNITS_PER_FPVALUE handle the soft-float and
3639 single-float cases. */
3640 switch (mips_abi)
3641 {
3642 case ABI_EABI:
3643 /* The EABI conventions have traditionally been defined in terms
3644 of TYPE_MODE, regardless of the actual type. */
3645 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3646 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3647 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3648 break;
3649
3650 case ABI_32:
3651 case ABI_O64:
3652 /* Only leading floating-point scalars are passed in
3653 floating-point registers. We also handle vector floats the same
3654 say, which is OK because they are not covered by the standard ABI. */
3655 info->fpr_p = (!cum->gp_reg_found
3656 && cum->arg_number < 2
3657 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3658 || VECTOR_FLOAT_TYPE_P (type))
3659 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3660 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3661 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3662 break;
3663
3664 case ABI_N32:
3665 case ABI_64:
3666 /* Scalar and complex floating-point types are passed in
3667 floating-point registers. */
3668 info->fpr_p = (named
3669 && (type == 0 || FLOAT_TYPE_P (type))
3670 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3671 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3672 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3673 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3674
3675 /* ??? According to the ABI documentation, the real and imaginary
3676 parts of complex floats should be passed in individual registers.
3677 The real and imaginary parts of stack arguments are supposed
3678 to be contiguous and there should be an extra word of padding
3679 at the end.
3680
3681 This has two problems. First, it makes it impossible to use a
3682 single "void *" va_list type, since register and stack arguments
3683 are passed differently. (At the time of writing, MIPSpro cannot
3684 handle complex float varargs correctly.) Second, it's unclear
3685 what should happen when there is only one register free.
3686
3687 For now, we assume that named complex floats should go into FPRs
3688 if there are two FPRs free, otherwise they should be passed in the
3689 same way as a struct containing two floats. */
3690 if (info->fpr_p
3691 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3692 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3693 {
3694 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3695 info->fpr_p = false;
3696 else
3697 num_words = 2;
3698 }
3699 break;
3700
3701 default:
3702 gcc_unreachable ();
3703 }
3704
3705 /* See whether the argument has doubleword alignment. */
3706 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3707
3708 /* Set REG_OFFSET to the register count we're interested in.
3709 The EABI allocates the floating-point registers separately,
3710 but the other ABIs allocate them like integer registers. */
3711 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3712 ? cum->num_fprs
3713 : cum->num_gprs);
3714
3715 /* Advance to an even register if the argument is doubleword-aligned. */
3716 if (doubleword_aligned_p)
3717 info->reg_offset += info->reg_offset & 1;
3718
3719 /* Work out the offset of a stack argument. */
3720 info->stack_offset = cum->stack_words;
3721 if (doubleword_aligned_p)
3722 info->stack_offset += info->stack_offset & 1;
3723
3724 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3725
3726 /* Partition the argument between registers and stack. */
3727 info->reg_words = MIN (num_words, max_regs);
3728 info->stack_words = num_words - info->reg_words;
3729 }
3730
3731
3732 /* Implement FUNCTION_ARG_ADVANCE. */
3733
3734 void
3735 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3736 tree type, int named)
3737 {
3738 struct mips_arg_info info;
3739
3740 mips_arg_info (cum, mode, type, named, &info);
3741
3742 if (!info.fpr_p)
3743 cum->gp_reg_found = true;
3744
3745 /* See the comment above the cumulative args structure in mips.h
3746 for an explanation of what this code does. It assumes the O32
3747 ABI, which passes at most 2 arguments in float registers. */
3748 if (cum->arg_number < 2 && info.fpr_p)
3749 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3750
3751 if (mips_abi != ABI_EABI || !info.fpr_p)
3752 cum->num_gprs = info.reg_offset + info.reg_words;
3753 else if (info.reg_words > 0)
3754 cum->num_fprs += FP_INC;
3755
3756 if (info.stack_words > 0)
3757 cum->stack_words = info.stack_offset + info.stack_words;
3758
3759 cum->arg_number++;
3760 }
3761
3762 /* Implement FUNCTION_ARG. */
3763
3764 struct rtx_def *
3765 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3766 tree type, int named)
3767 {
3768 struct mips_arg_info info;
3769
3770 /* We will be called with a mode of VOIDmode after the last argument
3771 has been seen. Whatever we return will be passed to the call
3772 insn. If we need a mips16 fp_code, return a REG with the code
3773 stored as the mode. */
3774 if (mode == VOIDmode)
3775 {
3776 if (TARGET_MIPS16 && cum->fp_code != 0)
3777 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3778
3779 else
3780 return 0;
3781 }
3782
3783 mips_arg_info (cum, mode, type, named, &info);
3784
3785 /* Return straight away if the whole argument is passed on the stack. */
3786 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3787 return 0;
3788
3789 if (type != 0
3790 && TREE_CODE (type) == RECORD_TYPE
3791 && TARGET_NEWABI
3792 && TYPE_SIZE_UNIT (type)
3793 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3794 && named)
3795 {
3796 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3797 structure contains a double in its entirety, then that 64 bit
3798 chunk is passed in a floating point register. */
3799 tree field;
3800
3801 /* First check to see if there is any such field. */
3802 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3803 if (TREE_CODE (field) == FIELD_DECL
3804 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3805 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3806 && host_integerp (bit_position (field), 0)
3807 && int_bit_position (field) % BITS_PER_WORD == 0)
3808 break;
3809
3810 if (field != 0)
3811 {
3812 /* Now handle the special case by returning a PARALLEL
3813 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3814 chunks are passed in registers. */
3815 unsigned int i;
3816 HOST_WIDE_INT bitpos;
3817 rtx ret;
3818
3819 /* assign_parms checks the mode of ENTRY_PARM, so we must
3820 use the actual mode here. */
3821 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3822
3823 bitpos = 0;
3824 field = TYPE_FIELDS (type);
3825 for (i = 0; i < info.reg_words; i++)
3826 {
3827 rtx reg;
3828
3829 for (; field; field = TREE_CHAIN (field))
3830 if (TREE_CODE (field) == FIELD_DECL
3831 && int_bit_position (field) >= bitpos)
3832 break;
3833
3834 if (field
3835 && int_bit_position (field) == bitpos
3836 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3837 && !TARGET_SOFT_FLOAT
3838 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3839 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3840 else
3841 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3842
3843 XVECEXP (ret, 0, i)
3844 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3845 GEN_INT (bitpos / BITS_PER_UNIT));
3846
3847 bitpos += BITS_PER_WORD;
3848 }
3849 return ret;
3850 }
3851 }
3852
3853 /* Handle the n32/n64 conventions for passing complex floating-point
3854 arguments in FPR pairs. The real part goes in the lower register
3855 and the imaginary part goes in the upper register. */
3856 if (TARGET_NEWABI
3857 && info.fpr_p
3858 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3859 {
3860 rtx real, imag;
3861 enum machine_mode inner;
3862 int reg;
3863
3864 inner = GET_MODE_INNER (mode);
3865 reg = FP_ARG_FIRST + info.reg_offset;
3866 real = gen_rtx_EXPR_LIST (VOIDmode,
3867 gen_rtx_REG (inner, reg),
3868 const0_rtx);
3869 imag = gen_rtx_EXPR_LIST (VOIDmode,
3870 gen_rtx_REG (inner, reg + info.reg_words / 2),
3871 GEN_INT (GET_MODE_SIZE (inner)));
3872 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3873 }
3874
3875 if (!info.fpr_p)
3876 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3877 else if (info.reg_offset == 1)
3878 /* This code handles the special o32 case in which the second word
3879 of the argument structure is passed in floating-point registers. */
3880 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3881 else
3882 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3883 }
3884
3885
3886 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3887
3888 static int
3889 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3890 enum machine_mode mode, tree type, bool named)
3891 {
3892 struct mips_arg_info info;
3893
3894 mips_arg_info (cum, mode, type, named, &info);
3895 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3896 }
3897
3898
3899 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3900 PARM_BOUNDARY bits of alignment, but will be given anything up
3901 to STACK_BOUNDARY bits if the type requires it. */
3902
3903 int
3904 function_arg_boundary (enum machine_mode mode, tree type)
3905 {
3906 unsigned int alignment;
3907
3908 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3909 if (alignment < PARM_BOUNDARY)
3910 alignment = PARM_BOUNDARY;
3911 if (alignment > STACK_BOUNDARY)
3912 alignment = STACK_BOUNDARY;
3913 return alignment;
3914 }
3915
3916 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3917 upward rather than downward. In other words, return true if the
3918 first byte of the stack slot has useful data, false if the last
3919 byte does. */
3920
3921 bool
3922 mips_pad_arg_upward (enum machine_mode mode, tree type)
3923 {
3924 /* On little-endian targets, the first byte of every stack argument
3925 is passed in the first byte of the stack slot. */
3926 if (!BYTES_BIG_ENDIAN)
3927 return true;
3928
3929 /* Otherwise, integral types are padded downward: the last byte of a
3930 stack argument is passed in the last byte of the stack slot. */
3931 if (type != 0
3932 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3933 : GET_MODE_CLASS (mode) == MODE_INT)
3934 return false;
3935
3936 /* Big-endian o64 pads floating-point arguments downward. */
3937 if (mips_abi == ABI_O64)
3938 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3939 return false;
3940
3941 /* Other types are padded upward for o32, o64, n32 and n64. */
3942 if (mips_abi != ABI_EABI)
3943 return true;
3944
3945 /* Arguments smaller than a stack slot are padded downward. */
3946 if (mode != BLKmode)
3947 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3948 else
3949 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3950 }
3951
3952
3953 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3954 if the least significant byte of the register has useful data. Return
3955 the opposite if the most significant byte does. */
3956
3957 bool
3958 mips_pad_reg_upward (enum machine_mode mode, tree type)
3959 {
3960 /* No shifting is required for floating-point arguments. */
3961 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3962 return !BYTES_BIG_ENDIAN;
3963
3964 /* Otherwise, apply the same padding to register arguments as we do
3965 to stack arguments. */
3966 return mips_pad_arg_upward (mode, type);
3967 }
3968 \f
3969 static void
3970 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3971 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3972 int no_rtl)
3973 {
3974 CUMULATIVE_ARGS local_cum;
3975 int gp_saved, fp_saved;
3976
3977 /* The caller has advanced CUM up to, but not beyond, the last named
3978 argument. Advance a local copy of CUM past the last "real" named
3979 argument, to find out how many registers are left over. */
3980
3981 local_cum = *cum;
3982 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3983
3984 /* Found out how many registers we need to save. */
3985 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3986 fp_saved = (EABI_FLOAT_VARARGS_P
3987 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3988 : 0);
3989
3990 if (!no_rtl)
3991 {
3992 if (gp_saved > 0)
3993 {
3994 rtx ptr, mem;
3995
3996 ptr = plus_constant (virtual_incoming_args_rtx,
3997 REG_PARM_STACK_SPACE (cfun->decl)
3998 - gp_saved * UNITS_PER_WORD);
3999 mem = gen_rtx_MEM (BLKmode, ptr);
4000 set_mem_alias_set (mem, get_varargs_alias_set ());
4001
4002 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4003 mem, gp_saved);
4004 }
4005 if (fp_saved > 0)
4006 {
4007 /* We can't use move_block_from_reg, because it will use
4008 the wrong mode. */
4009 enum machine_mode mode;
4010 int off, i;
4011
4012 /* Set OFF to the offset from virtual_incoming_args_rtx of
4013 the first float register. The FP save area lies below
4014 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4015 off = -gp_saved * UNITS_PER_WORD;
4016 off &= ~(UNITS_PER_FPVALUE - 1);
4017 off -= fp_saved * UNITS_PER_FPREG;
4018
4019 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4020
4021 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4022 {
4023 rtx ptr, mem;
4024
4025 ptr = plus_constant (virtual_incoming_args_rtx, off);
4026 mem = gen_rtx_MEM (mode, ptr);
4027 set_mem_alias_set (mem, get_varargs_alias_set ());
4028 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4029 off += UNITS_PER_HWFPVALUE;
4030 }
4031 }
4032 }
4033 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4034 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4035 + fp_saved * UNITS_PER_FPREG);
4036 }
4037
4038 /* Create the va_list data type.
4039 We keep 3 pointers, and two offsets.
4040 Two pointers are to the overflow area, which starts at the CFA.
4041 One of these is constant, for addressing into the GPR save area below it.
4042 The other is advanced up the stack through the overflow region.
4043 The third pointer is to the GPR save area. Since the FPR save area
4044 is just below it, we can address FPR slots off this pointer.
4045 We also keep two one-byte offsets, which are to be subtracted from the
4046 constant pointers to yield addresses in the GPR and FPR save areas.
4047 These are downcounted as float or non-float arguments are used,
4048 and when they get to zero, the argument must be obtained from the
4049 overflow region.
4050 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4051 pointer is enough. It's started at the GPR save area, and is
4052 advanced, period.
4053 Note that the GPR save area is not constant size, due to optimization
4054 in the prologue. Hence, we can't use a design with two pointers
4055 and two offsets, although we could have designed this with two pointers
4056 and three offsets. */
4057
4058 static tree
4059 mips_build_builtin_va_list (void)
4060 {
4061 if (EABI_FLOAT_VARARGS_P)
4062 {
4063 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4064 tree array, index;
4065
4066 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4067
4068 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4069 ptr_type_node);
4070 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4071 ptr_type_node);
4072 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4073 ptr_type_node);
4074 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4075 unsigned_char_type_node);
4076 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4077 unsigned_char_type_node);
4078 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4079 warn on every user file. */
4080 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4081 array = build_array_type (unsigned_char_type_node,
4082 build_index_type (index));
4083 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4084
4085 DECL_FIELD_CONTEXT (f_ovfl) = record;
4086 DECL_FIELD_CONTEXT (f_gtop) = record;
4087 DECL_FIELD_CONTEXT (f_ftop) = record;
4088 DECL_FIELD_CONTEXT (f_goff) = record;
4089 DECL_FIELD_CONTEXT (f_foff) = record;
4090 DECL_FIELD_CONTEXT (f_res) = record;
4091
4092 TYPE_FIELDS (record) = f_ovfl;
4093 TREE_CHAIN (f_ovfl) = f_gtop;
4094 TREE_CHAIN (f_gtop) = f_ftop;
4095 TREE_CHAIN (f_ftop) = f_goff;
4096 TREE_CHAIN (f_goff) = f_foff;
4097 TREE_CHAIN (f_foff) = f_res;
4098
4099 layout_type (record);
4100 return record;
4101 }
4102 else if (TARGET_IRIX && TARGET_IRIX6)
4103 /* On IRIX 6, this type is 'char *'. */
4104 return build_pointer_type (char_type_node);
4105 else
4106 /* Otherwise, we use 'void *'. */
4107 return ptr_type_node;
4108 }
4109
4110 /* Implement va_start. */
4111
4112 void
4113 mips_va_start (tree valist, rtx nextarg)
4114 {
4115 if (EABI_FLOAT_VARARGS_P)
4116 {
4117 const CUMULATIVE_ARGS *cum;
4118 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4119 tree ovfl, gtop, ftop, goff, foff;
4120 tree t;
4121 int gpr_save_area_size;
4122 int fpr_save_area_size;
4123 int fpr_offset;
4124
4125 cum = &current_function_args_info;
4126 gpr_save_area_size
4127 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4128 fpr_save_area_size
4129 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4130
4131 f_ovfl = TYPE_FIELDS (va_list_type_node);
4132 f_gtop = TREE_CHAIN (f_ovfl);
4133 f_ftop = TREE_CHAIN (f_gtop);
4134 f_goff = TREE_CHAIN (f_ftop);
4135 f_foff = TREE_CHAIN (f_goff);
4136
4137 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4138 NULL_TREE);
4139 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4140 NULL_TREE);
4141 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4142 NULL_TREE);
4143 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4144 NULL_TREE);
4145 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4146 NULL_TREE);
4147
4148 /* Emit code to initialize OVFL, which points to the next varargs
4149 stack argument. CUM->STACK_WORDS gives the number of stack
4150 words used by named arguments. */
4151 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4152 if (cum->stack_words > 0)
4153 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4154 build_int_cst (NULL_TREE,
4155 cum->stack_words * UNITS_PER_WORD));
4156 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4157 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4158
4159 /* Emit code to initialize GTOP, the top of the GPR save area. */
4160 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4161 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4162 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4163
4164 /* Emit code to initialize FTOP, the top of the FPR save area.
4165 This address is gpr_save_area_bytes below GTOP, rounded
4166 down to the next fp-aligned boundary. */
4167 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4168 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4169 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4170 if (fpr_offset)
4171 t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4172 build_int_cst (NULL_TREE, -fpr_offset));
4173 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4174 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4175
4176 /* Emit code to initialize GOFF, the offset from GTOP of the
4177 next GPR argument. */
4178 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4179 build_int_cst (NULL_TREE, gpr_save_area_size));
4180 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4181
4182 /* Likewise emit code to initialize FOFF, the offset from FTOP
4183 of the next FPR argument. */
4184 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4185 build_int_cst (NULL_TREE, fpr_save_area_size));
4186 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4187 }
4188 else
4189 {
4190 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4191 std_expand_builtin_va_start (valist, nextarg);
4192 }
4193 }
4194 \f
4195 /* Implement va_arg. */
4196
4197 static tree
4198 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4199 {
4200 HOST_WIDE_INT size, rsize;
4201 tree addr;
4202 bool indirect;
4203
4204 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4205
4206 if (indirect)
4207 type = build_pointer_type (type);
4208
4209 size = int_size_in_bytes (type);
4210 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4211
4212 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4213 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4214 else
4215 {
4216 /* Not a simple merged stack. */
4217
4218 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4219 tree ovfl, top, off, align;
4220 HOST_WIDE_INT osize;
4221 tree t, u;
4222
4223 f_ovfl = TYPE_FIELDS (va_list_type_node);
4224 f_gtop = TREE_CHAIN (f_ovfl);
4225 f_ftop = TREE_CHAIN (f_gtop);
4226 f_goff = TREE_CHAIN (f_ftop);
4227 f_foff = TREE_CHAIN (f_goff);
4228
4229 /* We maintain separate pointers and offsets for floating-point
4230 and integer arguments, but we need similar code in both cases.
4231 Let:
4232
4233 TOP be the top of the register save area;
4234 OFF be the offset from TOP of the next register;
4235 ADDR_RTX be the address of the argument;
4236 RSIZE be the number of bytes used to store the argument
4237 when it's in the register save area;
4238 OSIZE be the number of bytes used to store it when it's
4239 in the stack overflow area; and
4240 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4241
4242 The code we want is:
4243
4244 1: off &= -rsize; // round down
4245 2: if (off != 0)
4246 3: {
4247 4: addr_rtx = top - off;
4248 5: off -= rsize;
4249 6: }
4250 7: else
4251 8: {
4252 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4253 10: addr_rtx = ovfl + PADDING;
4254 11: ovfl += osize;
4255 14: }
4256
4257 [1] and [9] can sometimes be optimized away. */
4258
4259 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4260 NULL_TREE);
4261
4262 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4263 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4264 {
4265 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4266 NULL_TREE);
4267 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4268 NULL_TREE);
4269
4270 /* When floating-point registers are saved to the stack,
4271 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4272 of the float's precision. */
4273 rsize = UNITS_PER_HWFPVALUE;
4274
4275 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4276 (= PARM_BOUNDARY bits). This can be different from RSIZE
4277 in two cases:
4278
4279 (1) On 32-bit targets when TYPE is a structure such as:
4280
4281 struct s { float f; };
4282
4283 Such structures are passed in paired FPRs, so RSIZE
4284 will be 8 bytes. However, the structure only takes
4285 up 4 bytes of memory, so OSIZE will only be 4.
4286
4287 (2) In combinations such as -mgp64 -msingle-float
4288 -fshort-double. Doubles passed in registers
4289 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4290 but those passed on the stack take up
4291 UNITS_PER_WORD bytes. */
4292 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4293 }
4294 else
4295 {
4296 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4297 NULL_TREE);
4298 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4299 NULL_TREE);
4300 if (rsize > UNITS_PER_WORD)
4301 {
4302 /* [1] Emit code for: off &= -rsize. */
4303 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4304 build_int_cst (NULL_TREE, -rsize));
4305 t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
4306 gimplify_and_add (t, pre_p);
4307 }
4308 osize = rsize;
4309 }
4310
4311 /* [2] Emit code to branch if off == 0. */
4312 t = build2 (NE_EXPR, boolean_type_node, off,
4313 build_int_cst (TREE_TYPE (off), 0));
4314 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4315
4316 /* [5] Emit code for: off -= rsize. We do this as a form of
4317 post-increment not available to C. Also widen for the
4318 coming pointer arithmetic. */
4319 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4320 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4321 t = fold_convert (sizetype, t);
4322 t = fold_convert (TREE_TYPE (top), t);
4323
4324 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4325 the argument has RSIZE - SIZE bytes of leading padding. */
4326 t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4327 if (BYTES_BIG_ENDIAN && rsize > size)
4328 {
4329 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4330 rsize - size));
4331 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4332 }
4333 COND_EXPR_THEN (addr) = t;
4334
4335 if (osize > UNITS_PER_WORD)
4336 {
4337 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4338 u = fold_convert (TREE_TYPE (ovfl),
4339 build_int_cst (NULL_TREE, osize - 1));
4340 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4341 u = fold_convert (TREE_TYPE (ovfl),
4342 build_int_cst (NULL_TREE, -osize));
4343 t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4344 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4345 }
4346 else
4347 align = NULL;
4348
4349 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4350 post-increment ovfl by osize. On big-endian machines,
4351 the argument has OSIZE - SIZE bytes of leading padding. */
4352 u = fold_convert (TREE_TYPE (ovfl),
4353 build_int_cst (NULL_TREE, osize));
4354 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4355 if (BYTES_BIG_ENDIAN && osize > size)
4356 {
4357 u = fold_convert (TREE_TYPE (t),
4358 build_int_cst (NULL_TREE, osize - size));
4359 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4360 }
4361
4362 /* String [9] and [10,11] together. */
4363 if (align)
4364 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4365 COND_EXPR_ELSE (addr) = t;
4366
4367 addr = fold_convert (build_pointer_type (type), addr);
4368 addr = build_va_arg_indirect_ref (addr);
4369 }
4370
4371 if (indirect)
4372 addr = build_va_arg_indirect_ref (addr);
4373
4374 return addr;
4375 }
4376 \f
4377 /* Return true if it is possible to use left/right accesses for a
4378 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4379 returning true, update *OP, *LEFT and *RIGHT as follows:
4380
4381 *OP is a BLKmode reference to the whole field.
4382
4383 *LEFT is a QImode reference to the first byte if big endian or
4384 the last byte if little endian. This address can be used in the
4385 left-side instructions (lwl, swl, ldl, sdl).
4386
4387 *RIGHT is a QImode reference to the opposite end of the field and
4388 can be used in the patterning right-side instruction. */
4389
4390 static bool
4391 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4392 rtx *left, rtx *right)
4393 {
4394 rtx first, last;
4395
4396 /* Check that the operand really is a MEM. Not all the extv and
4397 extzv predicates are checked. */
4398 if (!MEM_P (*op))
4399 return false;
4400
4401 /* Check that the size is valid. */
4402 if (width != 32 && (!TARGET_64BIT || width != 64))
4403 return false;
4404
4405 /* We can only access byte-aligned values. Since we are always passed
4406 a reference to the first byte of the field, it is not necessary to
4407 do anything with BITPOS after this check. */
4408 if (bitpos % BITS_PER_UNIT != 0)
4409 return false;
4410
4411 /* Reject aligned bitfields: we want to use a normal load or store
4412 instead of a left/right pair. */
4413 if (MEM_ALIGN (*op) >= width)
4414 return false;
4415
4416 /* Adjust *OP to refer to the whole field. This also has the effect
4417 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4418 *op = adjust_address (*op, BLKmode, 0);
4419 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4420
4421 /* Get references to both ends of the field. We deliberately don't
4422 use the original QImode *OP for FIRST since the new BLKmode one
4423 might have a simpler address. */
4424 first = adjust_address (*op, QImode, 0);
4425 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4426
4427 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4428 be the upper word and RIGHT the lower word. */
4429 if (TARGET_BIG_ENDIAN)
4430 *left = first, *right = last;
4431 else
4432 *left = last, *right = first;
4433
4434 return true;
4435 }
4436
4437
4438 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4439 Return true on success. We only handle cases where zero_extract is
4440 equivalent to sign_extract. */
4441
4442 bool
4443 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4444 {
4445 rtx left, right, temp;
4446
4447 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4448 paradoxical word_mode subreg. This is the only case in which
4449 we allow the destination to be larger than the source. */
4450 if (GET_CODE (dest) == SUBREG
4451 && GET_MODE (dest) == DImode
4452 && SUBREG_BYTE (dest) == 0
4453 && GET_MODE (SUBREG_REG (dest)) == SImode)
4454 dest = SUBREG_REG (dest);
4455
4456 /* After the above adjustment, the destination must be the same
4457 width as the source. */
4458 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4459 return false;
4460
4461 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4462 return false;
4463
4464 temp = gen_reg_rtx (GET_MODE (dest));
4465 if (GET_MODE (dest) == DImode)
4466 {
4467 emit_insn (gen_mov_ldl (temp, src, left));
4468 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4469 }
4470 else
4471 {
4472 emit_insn (gen_mov_lwl (temp, src, left));
4473 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4474 }
4475 return true;
4476 }
4477
4478
4479 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4480 true on success. */
4481
4482 bool
4483 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4484 {
4485 rtx left, right;
4486
4487 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4488 return false;
4489
4490 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
4491
4492 if (GET_MODE (src) == DImode)
4493 {
4494 emit_insn (gen_mov_sdl (dest, src, left));
4495 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4496 }
4497 else
4498 {
4499 emit_insn (gen_mov_swl (dest, src, left));
4500 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4501 }
4502 return true;
4503 }
4504
4505 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4506 source of an "ext" instruction or the destination of an "ins"
4507 instruction. OP must be a register operand and the following
4508 conditions must hold:
4509
4510 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4511 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4512 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4513
4514 Also reject lengths equal to a word as they are better handled
4515 by the move patterns. */
4516
4517 bool
4518 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4519 {
4520 HOST_WIDE_INT len, pos;
4521
4522 if (!ISA_HAS_EXT_INS
4523 || !register_operand (op, VOIDmode)
4524 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4525 return false;
4526
4527 len = INTVAL (size);
4528 pos = INTVAL (position);
4529
4530 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4531 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4532 return false;
4533
4534 return true;
4535 }
4536
4537 /* Set up globals to generate code for the ISA or processor
4538 described by INFO. */
4539
4540 static void
4541 mips_set_architecture (const struct mips_cpu_info *info)
4542 {
4543 if (info != 0)
4544 {
4545 mips_arch_info = info;
4546 mips_arch = info->cpu;
4547 mips_isa = info->isa;
4548 }
4549 }
4550
4551
4552 /* Likewise for tuning. */
4553
4554 static void
4555 mips_set_tune (const struct mips_cpu_info *info)
4556 {
4557 if (info != 0)
4558 {
4559 mips_tune_info = info;
4560 mips_tune = info->cpu;
4561 }
4562 }
4563
4564 /* Implement TARGET_HANDLE_OPTION. */
4565
4566 static bool
4567 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4568 {
4569 switch (code)
4570 {
4571 case OPT_mabi_:
4572 if (strcmp (arg, "32") == 0)
4573 mips_abi = ABI_32;
4574 else if (strcmp (arg, "o64") == 0)
4575 mips_abi = ABI_O64;
4576 else if (strcmp (arg, "n32") == 0)
4577 mips_abi = ABI_N32;
4578 else if (strcmp (arg, "64") == 0)
4579 mips_abi = ABI_64;
4580 else if (strcmp (arg, "eabi") == 0)
4581 mips_abi = ABI_EABI;
4582 else
4583 return false;
4584 return true;
4585
4586 case OPT_march_:
4587 case OPT_mtune_:
4588 return mips_parse_cpu (arg) != 0;
4589
4590 case OPT_mips:
4591 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4592 return mips_isa_info != 0;
4593
4594 case OPT_mno_flush_func:
4595 mips_cache_flush_func = NULL;
4596 return true;
4597
4598 default:
4599 return true;
4600 }
4601 }
4602
4603 /* Set up the threshold for data to go into the small data area, instead
4604 of the normal data area, and detect any conflicts in the switches. */
4605
4606 void
4607 override_options (void)
4608 {
4609 int i, start, regno;
4610 enum machine_mode mode;
4611
4612 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4613
4614 /* The following code determines the architecture and register size.
4615 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4616 The GAS and GCC code should be kept in sync as much as possible. */
4617
4618 if (mips_arch_string != 0)
4619 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4620
4621 if (mips_isa_info != 0)
4622 {
4623 if (mips_arch_info == 0)
4624 mips_set_architecture (mips_isa_info);
4625 else if (mips_arch_info->isa != mips_isa_info->isa)
4626 error ("-%s conflicts with the other architecture options, "
4627 "which specify a %s processor",
4628 mips_isa_info->name,
4629 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4630 }
4631
4632 if (mips_arch_info == 0)
4633 {
4634 #ifdef MIPS_CPU_STRING_DEFAULT
4635 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4636 #else
4637 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4638 #endif
4639 }
4640
4641 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4642 error ("-march=%s is not compatible with the selected ABI",
4643 mips_arch_info->name);
4644
4645 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4646 if (mips_tune_string != 0)
4647 mips_set_tune (mips_parse_cpu (mips_tune_string));
4648
4649 if (mips_tune_info == 0)
4650 mips_set_tune (mips_arch_info);
4651
4652 /* Set cost structure for the processor. */
4653 mips_cost = &mips_rtx_cost_data[mips_tune];
4654
4655 if ((target_flags_explicit & MASK_64BIT) != 0)
4656 {
4657 /* The user specified the size of the integer registers. Make sure
4658 it agrees with the ABI and ISA. */
4659 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4660 error ("-mgp64 used with a 32-bit processor");
4661 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4662 error ("-mgp32 used with a 64-bit ABI");
4663 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4664 error ("-mgp64 used with a 32-bit ABI");
4665 }
4666 else
4667 {
4668 /* Infer the integer register size from the ABI and processor.
4669 Restrict ourselves to 32-bit registers if that's all the
4670 processor has, or if the ABI cannot handle 64-bit registers. */
4671 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4672 target_flags &= ~MASK_64BIT;
4673 else
4674 target_flags |= MASK_64BIT;
4675 }
4676
4677 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4678 {
4679 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4680 only one right answer here. */
4681 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4682 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4683 else if (!TARGET_64BIT && TARGET_FLOAT64)
4684 error ("unsupported combination: %s", "-mgp32 -mfp64");
4685 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4686 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4687 }
4688 else
4689 {
4690 /* -msingle-float selects 32-bit float registers. Otherwise the
4691 float registers should be the same size as the integer ones. */
4692 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4693 target_flags |= MASK_FLOAT64;
4694 else
4695 target_flags &= ~MASK_FLOAT64;
4696 }
4697
4698 /* End of code shared with GAS. */
4699
4700 if ((target_flags_explicit & MASK_LONG64) == 0)
4701 {
4702 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4703 target_flags |= MASK_LONG64;
4704 else
4705 target_flags &= ~MASK_LONG64;
4706 }
4707
4708 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4709 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4710 {
4711 /* For some configurations, it is useful to have -march control
4712 the default setting of MASK_SOFT_FLOAT. */
4713 switch ((int) mips_arch)
4714 {
4715 case PROCESSOR_R4100:
4716 case PROCESSOR_R4111:
4717 case PROCESSOR_R4120:
4718 case PROCESSOR_R4130:
4719 target_flags |= MASK_SOFT_FLOAT;
4720 break;
4721
4722 default:
4723 target_flags &= ~MASK_SOFT_FLOAT;
4724 break;
4725 }
4726 }
4727
4728 if (!TARGET_OLDABI)
4729 flag_pcc_struct_return = 0;
4730
4731 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4732 {
4733 /* If neither -mbranch-likely nor -mno-branch-likely was given
4734 on the command line, set MASK_BRANCHLIKELY based on the target
4735 architecture.
4736
4737 By default, we enable use of Branch Likely instructions on
4738 all architectures which support them with the following
4739 exceptions: when creating MIPS32 or MIPS64 code, and when
4740 tuning for architectures where their use tends to hurt
4741 performance.
4742
4743 The MIPS32 and MIPS64 architecture specifications say "Software
4744 is strongly encouraged to avoid use of Branch Likely
4745 instructions, as they will be removed from a future revision
4746 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4747 issue those instructions unless instructed to do so by
4748 -mbranch-likely. */
4749 if (ISA_HAS_BRANCHLIKELY
4750 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4751 && !(TUNE_MIPS5500 || TUNE_SB1))
4752 target_flags |= MASK_BRANCHLIKELY;
4753 else
4754 target_flags &= ~MASK_BRANCHLIKELY;
4755 }
4756 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4757 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4758
4759 /* The effect of -mabicalls isn't defined for the EABI. */
4760 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4761 {
4762 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4763 target_flags &= ~MASK_ABICALLS;
4764 }
4765
4766 if (TARGET_ABICALLS)
4767 {
4768 /* We need to set flag_pic for executables as well as DSOs
4769 because we may reference symbols that are not defined in
4770 the final executable. (MIPS does not use things like
4771 copy relocs, for example.)
4772
4773 Also, there is a body of code that uses __PIC__ to distinguish
4774 between -mabicalls and -mno-abicalls code. */
4775 flag_pic = 1;
4776 if (mips_section_threshold > 0)
4777 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4778 }
4779
4780 /* mips_split_addresses is a half-way house between explicit
4781 relocations and the traditional assembler macros. It can
4782 split absolute 32-bit symbolic constants into a high/lo_sum
4783 pair but uses macros for other sorts of access.
4784
4785 Like explicit relocation support for REL targets, it relies
4786 on GNU extensions in the assembler and the linker.
4787
4788 Although this code should work for -O0, it has traditionally
4789 been treated as an optimization. */
4790 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4791 && optimize && !flag_pic
4792 && !ABI_HAS_64BIT_SYMBOLS)
4793 mips_split_addresses = 1;
4794 else
4795 mips_split_addresses = 0;
4796
4797 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4798 faster code, but at the expense of more nops. Enable it at -O3 and
4799 above. */
4800 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4801 target_flags |= MASK_VR4130_ALIGN;
4802
4803 /* When compiling for the mips16, we cannot use floating point. We
4804 record the original hard float value in mips16_hard_float. */
4805 if (TARGET_MIPS16)
4806 {
4807 if (TARGET_SOFT_FLOAT)
4808 mips16_hard_float = 0;
4809 else
4810 mips16_hard_float = 1;
4811 target_flags |= MASK_SOFT_FLOAT;
4812
4813 /* Don't run the scheduler before reload, since it tends to
4814 increase register pressure. */
4815 flag_schedule_insns = 0;
4816
4817 /* Don't do hot/cold partitioning. The constant layout code expects
4818 the whole function to be in a single section. */
4819 flag_reorder_blocks_and_partition = 0;
4820
4821 /* Silently disable -mexplicit-relocs since it doesn't apply
4822 to mips16 code. Even so, it would overly pedantic to warn
4823 about "-mips16 -mexplicit-relocs", especially given that
4824 we use a %gprel() operator. */
4825 target_flags &= ~MASK_EXPLICIT_RELOCS;
4826 }
4827
4828 /* When using explicit relocs, we call dbr_schedule from within
4829 mips_reorg. */
4830 if (TARGET_EXPLICIT_RELOCS)
4831 {
4832 mips_flag_delayed_branch = flag_delayed_branch;
4833 flag_delayed_branch = 0;
4834 }
4835
4836 #ifdef MIPS_TFMODE_FORMAT
4837 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4838 #endif
4839
4840 /* Make sure that the user didn't turn off paired single support when
4841 MIPS-3D support is requested. */
4842 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4843 && !TARGET_PAIRED_SINGLE_FLOAT)
4844 error ("-mips3d requires -mpaired-single");
4845
4846 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4847 if (TARGET_MIPS3D)
4848 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4849
4850 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4851 and TARGET_HARD_FLOAT are both true. */
4852 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4853 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4854
4855 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4856 enabled. */
4857 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4858 error ("-mips3d/-mpaired-single must be used with -mips64");
4859
4860 if (TARGET_MIPS16 && TARGET_DSP)
4861 error ("-mips16 and -mdsp cannot be used together");
4862
4863 mips_print_operand_punct['?'] = 1;
4864 mips_print_operand_punct['#'] = 1;
4865 mips_print_operand_punct['/'] = 1;
4866 mips_print_operand_punct['&'] = 1;
4867 mips_print_operand_punct['!'] = 1;
4868 mips_print_operand_punct['*'] = 1;
4869 mips_print_operand_punct['@'] = 1;
4870 mips_print_operand_punct['.'] = 1;
4871 mips_print_operand_punct['('] = 1;
4872 mips_print_operand_punct[')'] = 1;
4873 mips_print_operand_punct['['] = 1;
4874 mips_print_operand_punct[']'] = 1;
4875 mips_print_operand_punct['<'] = 1;
4876 mips_print_operand_punct['>'] = 1;
4877 mips_print_operand_punct['{'] = 1;
4878 mips_print_operand_punct['}'] = 1;
4879 mips_print_operand_punct['^'] = 1;
4880 mips_print_operand_punct['$'] = 1;
4881 mips_print_operand_punct['+'] = 1;
4882 mips_print_operand_punct['~'] = 1;
4883
4884 /* Set up array to map GCC register number to debug register number.
4885 Ignore the special purpose register numbers. */
4886
4887 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4888 mips_dbx_regno[i] = -1;
4889
4890 start = GP_DBX_FIRST - GP_REG_FIRST;
4891 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4892 mips_dbx_regno[i] = i + start;
4893
4894 start = FP_DBX_FIRST - FP_REG_FIRST;
4895 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4896 mips_dbx_regno[i] = i + start;
4897
4898 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4899 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4900
4901 /* Set up array giving whether a given register can hold a given mode. */
4902
4903 for (mode = VOIDmode;
4904 mode != MAX_MACHINE_MODE;
4905 mode = (enum machine_mode) ((int)mode + 1))
4906 {
4907 register int size = GET_MODE_SIZE (mode);
4908 register enum mode_class class = GET_MODE_CLASS (mode);
4909
4910 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4911 {
4912 register int temp;
4913
4914 if (mode == CCV2mode)
4915 temp = (ISA_HAS_8CC
4916 && ST_REG_P (regno)
4917 && (regno - ST_REG_FIRST) % 2 == 0);
4918
4919 else if (mode == CCV4mode)
4920 temp = (ISA_HAS_8CC
4921 && ST_REG_P (regno)
4922 && (regno - ST_REG_FIRST) % 4 == 0);
4923
4924 else if (mode == CCmode)
4925 {
4926 if (! ISA_HAS_8CC)
4927 temp = (regno == FPSW_REGNUM);
4928 else
4929 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4930 || FP_REG_P (regno));
4931 }
4932
4933 else if (GP_REG_P (regno))
4934 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4935
4936 else if (FP_REG_P (regno))
4937 temp = ((regno % FP_INC) == 0)
4938 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4939 || class == MODE_VECTOR_FLOAT)
4940 && size <= UNITS_PER_FPVALUE)
4941 /* Allow integer modes that fit into a single
4942 register. We need to put integers into FPRs
4943 when using instructions like cvt and trunc.
4944 We can't allow sizes smaller than a word,
4945 the FPU has no appropriate load/store
4946 instructions for those. */
4947 || (class == MODE_INT
4948 && size >= MIN_UNITS_PER_WORD
4949 && size <= UNITS_PER_FPREG)
4950 /* Allow TFmode for CCmode reloads. */
4951 || (ISA_HAS_8CC && mode == TFmode));
4952
4953 else if (ACC_REG_P (regno))
4954 temp = (INTEGRAL_MODE_P (mode)
4955 && (size <= UNITS_PER_WORD
4956 || (ACC_HI_REG_P (regno)
4957 && size == 2 * UNITS_PER_WORD)));
4958
4959 else if (ALL_COP_REG_P (regno))
4960 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4961 else
4962 temp = 0;
4963
4964 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4965 }
4966 }
4967
4968 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4969 initialized yet, so we can't use that here. */
4970 gpr_mode = TARGET_64BIT ? DImode : SImode;
4971
4972 /* Provide default values for align_* for 64-bit targets. */
4973 if (TARGET_64BIT && !TARGET_MIPS16)
4974 {
4975 if (align_loops == 0)
4976 align_loops = 8;
4977 if (align_jumps == 0)
4978 align_jumps = 8;
4979 if (align_functions == 0)
4980 align_functions = 8;
4981 }
4982
4983 /* Function to allocate machine-dependent function status. */
4984 init_machine_status = &mips_init_machine_status;
4985
4986 if (ABI_HAS_64BIT_SYMBOLS)
4987 {
4988 if (TARGET_EXPLICIT_RELOCS)
4989 {
4990 mips_split_p[SYMBOL_64_HIGH] = true;
4991 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4992 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4993
4994 mips_split_p[SYMBOL_64_MID] = true;
4995 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4996 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4997
4998 mips_split_p[SYMBOL_64_LOW] = true;
4999 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5000 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5001
5002 mips_split_p[SYMBOL_GENERAL] = true;
5003 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5004 }
5005 }
5006 else
5007 {
5008 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5009 {
5010 mips_split_p[SYMBOL_GENERAL] = true;
5011 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5012 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5013 }
5014 }
5015
5016 if (TARGET_MIPS16)
5017 {
5018 /* The high part is provided by a pseudo copy of $gp. */
5019 mips_split_p[SYMBOL_SMALL_DATA] = true;
5020 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5021 }
5022
5023 if (TARGET_EXPLICIT_RELOCS)
5024 {
5025 /* Small data constants are kept whole until after reload,
5026 then lowered by mips_rewrite_small_data. */
5027 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5028
5029 mips_split_p[SYMBOL_GOT_LOCAL] = true;
5030 if (TARGET_NEWABI)
5031 {
5032 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5033 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
5034 }
5035 else
5036 {
5037 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5038 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
5039 }
5040
5041 if (TARGET_XGOT)
5042 {
5043 /* The HIGH and LO_SUM are matched by special .md patterns. */
5044 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
5045
5046 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
5047 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
5048 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
5049
5050 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5051 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5052 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5053 }
5054 else
5055 {
5056 if (TARGET_NEWABI)
5057 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
5058 else
5059 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
5060 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5061 }
5062 }
5063
5064 if (TARGET_NEWABI)
5065 {
5066 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5067 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5068 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5069 }
5070
5071 /* Thread-local relocation operators. */
5072 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5073 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5074 mips_split_p[SYMBOL_DTPREL] = 1;
5075 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5076 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5077 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5078 mips_split_p[SYMBOL_TPREL] = 1;
5079 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5080 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5081
5082 /* We don't have a thread pointer access instruction on MIPS16, or
5083 appropriate TLS relocations. */
5084 if (TARGET_MIPS16)
5085 targetm.have_tls = false;
5086
5087 /* Default to working around R4000 errata only if the processor
5088 was selected explicitly. */
5089 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5090 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5091 target_flags |= MASK_FIX_R4000;
5092
5093 /* Default to working around R4400 errata only if the processor
5094 was selected explicitly. */
5095 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5096 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5097 target_flags |= MASK_FIX_R4400;
5098 }
5099
5100 /* Implement CONDITIONAL_REGISTER_USAGE. */
5101
5102 void
5103 mips_conditional_register_usage (void)
5104 {
5105 if (!TARGET_DSP)
5106 {
5107 int regno;
5108
5109 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5110 fixed_regs[regno] = call_used_regs[regno] = 1;
5111 }
5112 if (!TARGET_HARD_FLOAT)
5113 {
5114 int regno;
5115
5116 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5117 fixed_regs[regno] = call_used_regs[regno] = 1;
5118 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5119 fixed_regs[regno] = call_used_regs[regno] = 1;
5120 }
5121 else if (! ISA_HAS_8CC)
5122 {
5123 int regno;
5124
5125 /* We only have a single condition code register. We
5126 implement this by hiding all the condition code registers,
5127 and generating RTL that refers directly to ST_REG_FIRST. */
5128 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5129 fixed_regs[regno] = call_used_regs[regno] = 1;
5130 }
5131 /* In mips16 mode, we permit the $t temporary registers to be used
5132 for reload. We prohibit the unused $s registers, since they
5133 are caller saved, and saving them via a mips16 register would
5134 probably waste more time than just reloading the value. */
5135 if (TARGET_MIPS16)
5136 {
5137 fixed_regs[18] = call_used_regs[18] = 1;
5138 fixed_regs[19] = call_used_regs[19] = 1;
5139 fixed_regs[20] = call_used_regs[20] = 1;
5140 fixed_regs[21] = call_used_regs[21] = 1;
5141 fixed_regs[22] = call_used_regs[22] = 1;
5142 fixed_regs[23] = call_used_regs[23] = 1;
5143 fixed_regs[26] = call_used_regs[26] = 1;
5144 fixed_regs[27] = call_used_regs[27] = 1;
5145 fixed_regs[30] = call_used_regs[30] = 1;
5146 }
5147 /* fp20-23 are now caller saved. */
5148 if (mips_abi == ABI_64)
5149 {
5150 int regno;
5151 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5152 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5153 }
5154 /* Odd registers from fp21 to fp31 are now caller saved. */
5155 if (mips_abi == ABI_N32)
5156 {
5157 int regno;
5158 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5159 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5160 }
5161 }
5162
5163 /* Allocate a chunk of memory for per-function machine-dependent data. */
5164 static struct machine_function *
5165 mips_init_machine_status (void)
5166 {
5167 return ((struct machine_function *)
5168 ggc_alloc_cleared (sizeof (struct machine_function)));
5169 }
5170
5171 /* On the mips16, we want to allocate $24 (T_REG) before other
5172 registers for instructions for which it is possible. This helps
5173 avoid shuffling registers around in order to set up for an xor,
5174 encouraging the compiler to use a cmp instead. */
5175
5176 void
5177 mips_order_regs_for_local_alloc (void)
5178 {
5179 register int i;
5180
5181 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5182 reg_alloc_order[i] = i;
5183
5184 if (TARGET_MIPS16)
5185 {
5186 /* It really doesn't matter where we put register 0, since it is
5187 a fixed register anyhow. */
5188 reg_alloc_order[0] = 24;
5189 reg_alloc_order[24] = 0;
5190 }
5191 }
5192
5193 \f
5194 /* The MIPS debug format wants all automatic variables and arguments
5195 to be in terms of the virtual frame pointer (stack pointer before
5196 any adjustment in the function), while the MIPS 3.0 linker wants
5197 the frame pointer to be the stack pointer after the initial
5198 adjustment. So, we do the adjustment here. The arg pointer (which
5199 is eliminated) points to the virtual frame pointer, while the frame
5200 pointer (which may be eliminated) points to the stack pointer after
5201 the initial adjustments. */
5202
5203 HOST_WIDE_INT
5204 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5205 {
5206 rtx offset2 = const0_rtx;
5207 rtx reg = eliminate_constant_term (addr, &offset2);
5208
5209 if (offset == 0)
5210 offset = INTVAL (offset2);
5211
5212 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5213 || reg == hard_frame_pointer_rtx)
5214 {
5215 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5216 ? compute_frame_size (get_frame_size ())
5217 : cfun->machine->frame.total_size;
5218
5219 /* MIPS16 frame is smaller */
5220 if (frame_pointer_needed && TARGET_MIPS16)
5221 frame_size -= cfun->machine->frame.args_size;
5222
5223 offset = offset - frame_size;
5224 }
5225
5226 /* sdbout_parms does not want this to crash for unrecognized cases. */
5227 #if 0
5228 else if (reg != arg_pointer_rtx)
5229 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5230 addr);
5231 #endif
5232
5233 return offset;
5234 }
5235 \f
5236 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5237
5238 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5239 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5240 'h' OP is HIGH, prints %hi(X),
5241 'd' output integer constant in decimal,
5242 'z' if the operand is 0, use $0 instead of normal operand.
5243 'D' print second part of double-word register or memory operand.
5244 'L' print low-order register of double-word register operand.
5245 'M' print high-order register of double-word register operand.
5246 'C' print part of opcode for a branch condition.
5247 'F' print part of opcode for a floating-point branch condition.
5248 'N' print part of opcode for a branch condition, inverted.
5249 'W' print part of opcode for a floating-point branch condition, inverted.
5250 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5251 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5252 't' like 'T', but with the EQ/NE cases reversed
5253 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5254 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5255 'R' print the reloc associated with LO_SUM
5256 'q' print DSP accumulator registers
5257
5258 The punctuation characters are:
5259
5260 '(' Turn on .set noreorder
5261 ')' Turn on .set reorder
5262 '[' Turn on .set noat
5263 ']' Turn on .set at
5264 '<' Turn on .set nomacro
5265 '>' Turn on .set macro
5266 '{' Turn on .set volatile (not GAS)
5267 '}' Turn on .set novolatile (not GAS)
5268 '&' Turn on .set noreorder if filling delay slots
5269 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5270 '!' Turn on .set nomacro if filling delay slots
5271 '#' Print nop if in a .set noreorder section.
5272 '/' Like '#', but does nothing within a delayed branch sequence
5273 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5274 '@' Print the name of the assembler temporary register (at or $1).
5275 '.' Print the name of the register with a hard-wired zero (zero or $0).
5276 '^' Print the name of the pic call-through register (t9 or $25).
5277 '$' Print the name of the stack pointer register (sp or $29).
5278 '+' Print the name of the gp register (usually gp or $28).
5279 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5280
5281 void
5282 print_operand (FILE *file, rtx op, int letter)
5283 {
5284 register enum rtx_code code;
5285
5286 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5287 {
5288 switch (letter)
5289 {
5290 case '?':
5291 if (mips_branch_likely)
5292 putc ('l', file);
5293 break;
5294
5295 case '@':
5296 fputs (reg_names [GP_REG_FIRST + 1], file);
5297 break;
5298
5299 case '^':
5300 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5301 break;
5302
5303 case '.':
5304 fputs (reg_names [GP_REG_FIRST + 0], file);
5305 break;
5306
5307 case '$':
5308 fputs (reg_names[STACK_POINTER_REGNUM], file);
5309 break;
5310
5311 case '+':
5312 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5313 break;
5314
5315 case '&':
5316 if (final_sequence != 0 && set_noreorder++ == 0)
5317 fputs (".set\tnoreorder\n\t", file);
5318 break;
5319
5320 case '*':
5321 if (final_sequence != 0)
5322 {
5323 if (set_noreorder++ == 0)
5324 fputs (".set\tnoreorder\n\t", file);
5325
5326 if (set_nomacro++ == 0)
5327 fputs (".set\tnomacro\n\t", file);
5328 }
5329 break;
5330
5331 case '!':
5332 if (final_sequence != 0 && set_nomacro++ == 0)
5333 fputs ("\n\t.set\tnomacro", file);
5334 break;
5335
5336 case '#':
5337 if (set_noreorder != 0)
5338 fputs ("\n\tnop", file);
5339 break;
5340
5341 case '/':
5342 /* Print an extra newline so that the delayed insn is separated
5343 from the following ones. This looks neater and is consistent
5344 with non-nop delayed sequences. */
5345 if (set_noreorder != 0 && final_sequence == 0)
5346 fputs ("\n\tnop\n", file);
5347 break;
5348
5349 case '(':
5350 if (set_noreorder++ == 0)
5351 fputs (".set\tnoreorder\n\t", file);
5352 break;
5353
5354 case ')':
5355 if (set_noreorder == 0)
5356 error ("internal error: %%) found without a %%( in assembler pattern");
5357
5358 else if (--set_noreorder == 0)
5359 fputs ("\n\t.set\treorder", file);
5360
5361 break;
5362
5363 case '[':
5364 if (set_noat++ == 0)
5365 fputs (".set\tnoat\n\t", file);
5366 break;
5367
5368 case ']':
5369 if (set_noat == 0)
5370 error ("internal error: %%] found without a %%[ in assembler pattern");
5371 else if (--set_noat == 0)
5372 fputs ("\n\t.set\tat", file);
5373
5374 break;
5375
5376 case '<':
5377 if (set_nomacro++ == 0)
5378 fputs (".set\tnomacro\n\t", file);
5379 break;
5380
5381 case '>':
5382 if (set_nomacro == 0)
5383 error ("internal error: %%> found without a %%< in assembler pattern");
5384 else if (--set_nomacro == 0)
5385 fputs ("\n\t.set\tmacro", file);
5386
5387 break;
5388
5389 case '{':
5390 if (set_volatile++ == 0)
5391 fputs ("#.set\tvolatile\n\t", file);
5392 break;
5393
5394 case '}':
5395 if (set_volatile == 0)
5396 error ("internal error: %%} found without a %%{ in assembler pattern");
5397 else if (--set_volatile == 0)
5398 fputs ("\n\t#.set\tnovolatile", file);
5399
5400 break;
5401
5402 case '~':
5403 {
5404 if (align_labels_log > 0)
5405 ASM_OUTPUT_ALIGN (file, align_labels_log);
5406 }
5407 break;
5408
5409 default:
5410 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5411 break;
5412 }
5413
5414 return;
5415 }
5416
5417 if (! op)
5418 {
5419 error ("PRINT_OPERAND null pointer");
5420 return;
5421 }
5422
5423 code = GET_CODE (op);
5424
5425 if (letter == 'C')
5426 switch (code)
5427 {
5428 case EQ: fputs ("eq", file); break;
5429 case NE: fputs ("ne", file); break;
5430 case GT: fputs ("gt", file); break;
5431 case GE: fputs ("ge", file); break;
5432 case LT: fputs ("lt", file); break;
5433 case LE: fputs ("le", file); break;
5434 case GTU: fputs ("gtu", file); break;
5435 case GEU: fputs ("geu", file); break;
5436 case LTU: fputs ("ltu", file); break;
5437 case LEU: fputs ("leu", file); break;
5438 default:
5439 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5440 }
5441
5442 else if (letter == 'N')
5443 switch (code)
5444 {
5445 case EQ: fputs ("ne", file); break;
5446 case NE: fputs ("eq", file); break;
5447 case GT: fputs ("le", file); break;
5448 case GE: fputs ("lt", file); break;
5449 case LT: fputs ("ge", file); break;
5450 case LE: fputs ("gt", file); break;
5451 case GTU: fputs ("leu", file); break;
5452 case GEU: fputs ("ltu", file); break;
5453 case LTU: fputs ("geu", file); break;
5454 case LEU: fputs ("gtu", file); break;
5455 default:
5456 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5457 }
5458
5459 else if (letter == 'F')
5460 switch (code)
5461 {
5462 case EQ: fputs ("c1f", file); break;
5463 case NE: fputs ("c1t", file); break;
5464 default:
5465 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5466 }
5467
5468 else if (letter == 'W')
5469 switch (code)
5470 {
5471 case EQ: fputs ("c1t", file); break;
5472 case NE: fputs ("c1f", file); break;
5473 default:
5474 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5475 }
5476
5477 else if (letter == 'h')
5478 {
5479 if (GET_CODE (op) == HIGH)
5480 op = XEXP (op, 0);
5481
5482 print_operand_reloc (file, op, mips_hi_relocs);
5483 }
5484
5485 else if (letter == 'R')
5486 print_operand_reloc (file, op, mips_lo_relocs);
5487
5488 else if (letter == 'Y')
5489 {
5490 if (GET_CODE (op) == CONST_INT
5491 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5492 < ARRAY_SIZE (mips_fp_conditions)))
5493 fputs (mips_fp_conditions[INTVAL (op)], file);
5494 else
5495 output_operand_lossage ("invalid %%Y value");
5496 }
5497
5498 else if (letter == 'Z')
5499 {
5500 if (ISA_HAS_8CC)
5501 {
5502 print_operand (file, op, 0);
5503 fputc (',', file);
5504 }
5505 }
5506
5507 else if (letter == 'q')
5508 {
5509 int regnum;
5510
5511 if (code != REG)
5512 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5513
5514 regnum = REGNO (op);
5515 if (MD_REG_P (regnum))
5516 fprintf (file, "$ac0");
5517 else if (DSP_ACC_REG_P (regnum))
5518 fprintf (file, "$ac%c", reg_names[regnum][3]);
5519 else
5520 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5521 }
5522
5523 else if (code == REG || code == SUBREG)
5524 {
5525 register int regnum;
5526
5527 if (code == REG)
5528 regnum = REGNO (op);
5529 else
5530 regnum = true_regnum (op);
5531
5532 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5533 || (letter == 'L' && WORDS_BIG_ENDIAN)
5534 || letter == 'D')
5535 regnum++;
5536
5537 fprintf (file, "%s", reg_names[regnum]);
5538 }
5539
5540 else if (code == MEM)
5541 {
5542 if (letter == 'D')
5543 output_address (plus_constant (XEXP (op, 0), 4));
5544 else
5545 output_address (XEXP (op, 0));
5546 }
5547
5548 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5549 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5550
5551 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5552 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5553
5554 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5555 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5556
5557 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5558 fputs (reg_names[GP_REG_FIRST], file);
5559
5560 else if (letter == 'd' || letter == 'x' || letter == 'X')
5561 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5562
5563 else if (letter == 'T' || letter == 't')
5564 {
5565 int truth = (code == NE) == (letter == 'T');
5566 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5567 }
5568
5569 else if (CONST_GP_P (op))
5570 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5571
5572 else
5573 output_addr_const (file, op);
5574 }
5575
5576
5577 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5578 RELOCS is the array of relocations to use. */
5579
5580 static void
5581 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5582 {
5583 enum mips_symbol_type symbol_type;
5584 const char *p;
5585 rtx base;
5586 HOST_WIDE_INT offset;
5587
5588 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5589 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5590
5591 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5592 mips_split_const (op, &base, &offset);
5593 if (UNSPEC_ADDRESS_P (base))
5594 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5595
5596 fputs (relocs[symbol_type], file);
5597 output_addr_const (file, op);
5598 for (p = relocs[symbol_type]; *p != 0; p++)
5599 if (*p == '(')
5600 fputc (')', file);
5601 }
5602 \f
5603 /* Output address operand X to FILE. */
5604
5605 void
5606 print_operand_address (FILE *file, rtx x)
5607 {
5608 struct mips_address_info addr;
5609
5610 if (mips_classify_address (&addr, x, word_mode, true))
5611 switch (addr.type)
5612 {
5613 case ADDRESS_REG:
5614 print_operand (file, addr.offset, 0);
5615 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5616 return;
5617
5618 case ADDRESS_LO_SUM:
5619 print_operand (file, addr.offset, 'R');
5620 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5621 return;
5622
5623 case ADDRESS_CONST_INT:
5624 output_addr_const (file, x);
5625 fprintf (file, "(%s)", reg_names[0]);
5626 return;
5627
5628 case ADDRESS_SYMBOLIC:
5629 output_addr_const (file, x);
5630 return;
5631 }
5632 gcc_unreachable ();
5633 }
5634 \f
5635 /* When using assembler macros, keep track of all of small-data externs
5636 so that mips_file_end can emit the appropriate declarations for them.
5637
5638 In most cases it would be safe (though pointless) to emit .externs
5639 for other symbols too. One exception is when an object is within
5640 the -G limit but declared by the user to be in a section other
5641 than .sbss or .sdata. */
5642
5643 int
5644 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5645 {
5646 register struct extern_list *p;
5647
5648 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5649 {
5650 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5651 p->next = extern_head;
5652 p->name = name;
5653 p->size = int_size_in_bytes (TREE_TYPE (decl));
5654 extern_head = p;
5655 }
5656
5657 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5658 {
5659 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5660 p->next = extern_head;
5661 p->name = name;
5662 p->size = -1;
5663 extern_head = p;
5664 }
5665
5666 return 0;
5667 }
5668
5669 #if TARGET_IRIX
5670 static void
5671 irix_output_external_libcall (rtx fun)
5672 {
5673 register struct extern_list *p;
5674
5675 if (mips_abi == ABI_32)
5676 {
5677 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5678 p->next = extern_head;
5679 p->name = XSTR (fun, 0);
5680 p->size = -1;
5681 extern_head = p;
5682 }
5683 }
5684 #endif
5685 \f
5686 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5687 put out a MIPS ECOFF file and a stab. */
5688
5689 void
5690 mips_output_filename (FILE *stream, const char *name)
5691 {
5692
5693 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5694 directives. */
5695 if (write_symbols == DWARF2_DEBUG)
5696 return;
5697 else if (mips_output_filename_first_time)
5698 {
5699 mips_output_filename_first_time = 0;
5700 num_source_filenames += 1;
5701 current_function_file = name;
5702 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5703 output_quoted_string (stream, name);
5704 putc ('\n', stream);
5705 }
5706
5707 /* If we are emitting stabs, let dbxout.c handle this (except for
5708 the mips_output_filename_first_time case). */
5709 else if (write_symbols == DBX_DEBUG)
5710 return;
5711
5712 else if (name != current_function_file
5713 && strcmp (name, current_function_file) != 0)
5714 {
5715 num_source_filenames += 1;
5716 current_function_file = name;
5717 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5718 output_quoted_string (stream, name);
5719 putc ('\n', stream);
5720 }
5721 }
5722 \f
5723 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5724 that should be written before the opening quote, such as "\t.ascii\t"
5725 for real string data or "\t# " for a comment. */
5726
5727 void
5728 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5729 const char *prefix)
5730 {
5731 size_t i;
5732 int cur_pos = 17;
5733 register const unsigned char *string =
5734 (const unsigned char *)string_param;
5735
5736 fprintf (stream, "%s\"", prefix);
5737 for (i = 0; i < len; i++)
5738 {
5739 register int c = string[i];
5740
5741 if (ISPRINT (c))
5742 {
5743 if (c == '\\' || c == '\"')
5744 {
5745 putc ('\\', stream);
5746 cur_pos++;
5747 }
5748 putc (c, stream);
5749 cur_pos++;
5750 }
5751 else
5752 {
5753 fprintf (stream, "\\%03o", c);
5754 cur_pos += 4;
5755 }
5756
5757 if (cur_pos > 72 && i+1 < len)
5758 {
5759 cur_pos = 17;
5760 fprintf (stream, "\"\n%s\"", prefix);
5761 }
5762 }
5763 fprintf (stream, "\"\n");
5764 }
5765 \f
5766 /* Implement TARGET_ASM_FILE_START. */
5767
5768 static void
5769 mips_file_start (void)
5770 {
5771 default_file_start ();
5772
5773 if (!TARGET_IRIX)
5774 {
5775 /* Generate a special section to describe the ABI switches used to
5776 produce the resultant binary. This used to be done by the assembler
5777 setting bits in the ELF header's flags field, but we have run out of
5778 bits. GDB needs this information in order to be able to correctly
5779 debug these binaries. See the function mips_gdbarch_init() in
5780 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5781 causes unnecessary IRIX 6 ld warnings. */
5782 const char * abi_string = NULL;
5783
5784 switch (mips_abi)
5785 {
5786 case ABI_32: abi_string = "abi32"; break;
5787 case ABI_N32: abi_string = "abiN32"; break;
5788 case ABI_64: abi_string = "abi64"; break;
5789 case ABI_O64: abi_string = "abiO64"; break;
5790 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5791 default:
5792 gcc_unreachable ();
5793 }
5794 /* Note - we use fprintf directly rather than calling switch_to_section
5795 because in this way we can avoid creating an allocated section. We
5796 do not want this section to take up any space in the running
5797 executable. */
5798 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5799
5800 /* There is no ELF header flag to distinguish long32 forms of the
5801 EABI from long64 forms. Emit a special section to help tools
5802 such as GDB. */
5803 if (mips_abi == ABI_EABI)
5804 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5805 TARGET_LONG64 ? 64 : 32);
5806
5807 /* Restore the default section. */
5808 fprintf (asm_out_file, "\t.previous\n");
5809 }
5810
5811 /* Generate the pseudo ops that System V.4 wants. */
5812 if (TARGET_ABICALLS)
5813 fprintf (asm_out_file, "\t.abicalls\n");
5814
5815 if (TARGET_MIPS16)
5816 fprintf (asm_out_file, "\t.set\tmips16\n");
5817
5818 if (flag_verbose_asm)
5819 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5820 ASM_COMMENT_START,
5821 mips_section_threshold, mips_arch_info->name, mips_isa);
5822 }
5823
5824 #ifdef BSS_SECTION_ASM_OP
5825 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5826 in the use of sbss. */
5827
5828 void
5829 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5830 unsigned HOST_WIDE_INT size, int align)
5831 {
5832 extern tree last_assemble_variable_decl;
5833
5834 if (mips_in_small_data_p (decl))
5835 switch_to_section (get_named_section (NULL, ".sbss", 0));
5836 else
5837 switch_to_section (bss_section);
5838 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5839 last_assemble_variable_decl = decl;
5840 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5841 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5842 }
5843 #endif
5844 \f
5845 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5846 .externs for any small-data variables that turned out to be external. */
5847
5848 static void
5849 mips_file_end (void)
5850 {
5851 tree name_tree;
5852 struct extern_list *p;
5853
5854 if (extern_head)
5855 {
5856 fputs ("\n", asm_out_file);
5857
5858 for (p = extern_head; p != 0; p = p->next)
5859 {
5860 name_tree = get_identifier (p->name);
5861
5862 /* Positively ensure only one .extern for any given symbol. */
5863 if (!TREE_ASM_WRITTEN (name_tree)
5864 && TREE_SYMBOL_REFERENCED (name_tree))
5865 {
5866 TREE_ASM_WRITTEN (name_tree) = 1;
5867 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5868 `.global name .text' directive for every used but
5869 undefined function. If we don't, the linker may perform
5870 an optimization (skipping over the insns that set $gp)
5871 when it is unsafe. */
5872 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5873 {
5874 fputs ("\t.globl ", asm_out_file);
5875 assemble_name (asm_out_file, p->name);
5876 fputs (" .text\n", asm_out_file);
5877 }
5878 else
5879 {
5880 fputs ("\t.extern\t", asm_out_file);
5881 assemble_name (asm_out_file, p->name);
5882 fprintf (asm_out_file, ", %d\n", p->size);
5883 }
5884 }
5885 }
5886 }
5887 }
5888
5889 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5890 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5891
5892 void
5893 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5894 unsigned HOST_WIDE_INT size,
5895 unsigned int align)
5896 {
5897 /* If the target wants uninitialized const declarations in
5898 .rdata then don't put them in .comm. */
5899 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5900 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5901 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5902 {
5903 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5904 targetm.asm_out.globalize_label (stream, name);
5905
5906 switch_to_section (readonly_data_section);
5907 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5908 mips_declare_object (stream, name, "",
5909 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5910 size);
5911 }
5912 else
5913 mips_declare_common_object (stream, name, "\n\t.comm\t",
5914 size, align, true);
5915 }
5916
5917 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5918 NAME is the name of the object and ALIGN is the required alignment
5919 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5920 alignment argument. */
5921
5922 void
5923 mips_declare_common_object (FILE *stream, const char *name,
5924 const char *init_string,
5925 unsigned HOST_WIDE_INT size,
5926 unsigned int align, bool takes_alignment_p)
5927 {
5928 if (!takes_alignment_p)
5929 {
5930 size += (align / BITS_PER_UNIT) - 1;
5931 size -= size % (align / BITS_PER_UNIT);
5932 mips_declare_object (stream, name, init_string,
5933 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5934 }
5935 else
5936 mips_declare_object (stream, name, init_string,
5937 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5938 size, align / BITS_PER_UNIT);
5939 }
5940
5941 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5942 macros, mark the symbol as written so that mips_file_end won't emit an
5943 .extern for it. STREAM is the output file, NAME is the name of the
5944 symbol, INIT_STRING is the string that should be written before the
5945 symbol and FINAL_STRING is the string that should be written after it.
5946 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5947
5948 void
5949 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5950 const char *final_string, ...)
5951 {
5952 va_list ap;
5953
5954 fputs (init_string, stream);
5955 assemble_name (stream, name);
5956 va_start (ap, final_string);
5957 vfprintf (stream, final_string, ap);
5958 va_end (ap);
5959
5960 if (!TARGET_EXPLICIT_RELOCS)
5961 {
5962 tree name_tree = get_identifier (name);
5963 TREE_ASM_WRITTEN (name_tree) = 1;
5964 }
5965 }
5966
5967 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5968 extern int size_directive_output;
5969
5970 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5971 definitions except that it uses mips_declare_object() to emit the label. */
5972
5973 void
5974 mips_declare_object_name (FILE *stream, const char *name,
5975 tree decl ATTRIBUTE_UNUSED)
5976 {
5977 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5978 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5979 #endif
5980
5981 size_directive_output = 0;
5982 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5983 {
5984 HOST_WIDE_INT size;
5985
5986 size_directive_output = 1;
5987 size = int_size_in_bytes (TREE_TYPE (decl));
5988 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5989 }
5990
5991 mips_declare_object (stream, name, "", ":\n");
5992 }
5993
5994 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5995
5996 void
5997 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5998 {
5999 const char *name;
6000
6001 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6002 if (!flag_inhibit_size_directive
6003 && DECL_SIZE (decl) != 0
6004 && !at_end && top_level
6005 && DECL_INITIAL (decl) == error_mark_node
6006 && !size_directive_output)
6007 {
6008 HOST_WIDE_INT size;
6009
6010 size_directive_output = 1;
6011 size = int_size_in_bytes (TREE_TYPE (decl));
6012 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6013 }
6014 }
6015 #endif
6016 \f
6017 /* Return true if X is a small data address that can be rewritten
6018 as a LO_SUM. */
6019
6020 static bool
6021 mips_rewrite_small_data_p (rtx x)
6022 {
6023 enum mips_symbol_type symbol_type;
6024
6025 return (TARGET_EXPLICIT_RELOCS
6026 && mips_symbolic_constant_p (x, &symbol_type)
6027 && symbol_type == SYMBOL_SMALL_DATA);
6028 }
6029
6030
6031 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6032
6033 static int
6034 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6035 {
6036 if (GET_CODE (*loc) == LO_SUM)
6037 return -1;
6038
6039 return mips_rewrite_small_data_p (*loc);
6040 }
6041
6042 /* Return true if OP refers to small data symbols directly, not through
6043 a LO_SUM. */
6044
6045 bool
6046 mips_small_data_pattern_p (rtx op)
6047 {
6048 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6049 }
6050 \f
6051 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6052
6053 static int
6054 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6055 {
6056 if (mips_rewrite_small_data_p (*loc))
6057 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6058
6059 if (GET_CODE (*loc) == LO_SUM)
6060 return -1;
6061
6062 return 0;
6063 }
6064
6065 /* If possible, rewrite OP so that it refers to small data using
6066 explicit relocations. */
6067
6068 rtx
6069 mips_rewrite_small_data (rtx op)
6070 {
6071 op = copy_insn (op);
6072 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6073 return op;
6074 }
6075 \f
6076 /* Return true if the current function has an insn that implicitly
6077 refers to $gp. */
6078
6079 static bool
6080 mips_function_has_gp_insn (void)
6081 {
6082 /* Don't bother rechecking if we found one last time. */
6083 if (!cfun->machine->has_gp_insn_p)
6084 {
6085 rtx insn;
6086
6087 push_topmost_sequence ();
6088 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6089 if (INSN_P (insn)
6090 && GET_CODE (PATTERN (insn)) != USE
6091 && GET_CODE (PATTERN (insn)) != CLOBBER
6092 && (get_attr_got (insn) != GOT_UNSET
6093 || small_data_pattern (PATTERN (insn), VOIDmode)))
6094 break;
6095 pop_topmost_sequence ();
6096
6097 cfun->machine->has_gp_insn_p = (insn != 0);
6098 }
6099 return cfun->machine->has_gp_insn_p;
6100 }
6101
6102
6103 /* Return the register that should be used as the global pointer
6104 within this function. Return 0 if the function doesn't need
6105 a global pointer. */
6106
6107 static unsigned int
6108 mips_global_pointer (void)
6109 {
6110 unsigned int regno;
6111
6112 /* $gp is always available in non-abicalls code. */
6113 if (!TARGET_ABICALLS)
6114 return GLOBAL_POINTER_REGNUM;
6115
6116 /* We must always provide $gp when it is used implicitly. */
6117 if (!TARGET_EXPLICIT_RELOCS)
6118 return GLOBAL_POINTER_REGNUM;
6119
6120 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6121 a valid gp. */
6122 if (current_function_profile)
6123 return GLOBAL_POINTER_REGNUM;
6124
6125 /* If the function has a nonlocal goto, $gp must hold the correct
6126 global pointer for the target function. */
6127 if (current_function_has_nonlocal_goto)
6128 return GLOBAL_POINTER_REGNUM;
6129
6130 /* If the gp is never referenced, there's no need to initialize it.
6131 Note that reload can sometimes introduce constant pool references
6132 into a function that otherwise didn't need them. For example,
6133 suppose we have an instruction like:
6134
6135 (set (reg:DF R1) (float:DF (reg:SI R2)))
6136
6137 If R2 turns out to be constant such as 1, the instruction may have a
6138 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6139 using this constant if R2 doesn't get allocated to a register.
6140
6141 In cases like these, reload will have added the constant to the pool
6142 but no instruction will yet refer to it. */
6143 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6144 && !current_function_uses_const_pool
6145 && !mips_function_has_gp_insn ())
6146 return 0;
6147
6148 /* We need a global pointer, but perhaps we can use a call-clobbered
6149 register instead of $gp. */
6150 if (TARGET_NEWABI && current_function_is_leaf)
6151 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6152 if (!regs_ever_live[regno]
6153 && call_used_regs[regno]
6154 && !fixed_regs[regno]
6155 && regno != PIC_FUNCTION_ADDR_REGNUM)
6156 return regno;
6157
6158 return GLOBAL_POINTER_REGNUM;
6159 }
6160
6161
6162 /* Return true if the current function must save REGNO. */
6163
6164 static bool
6165 mips_save_reg_p (unsigned int regno)
6166 {
6167 /* We only need to save $gp for NewABI PIC. */
6168 if (regno == GLOBAL_POINTER_REGNUM)
6169 return (TARGET_ABICALLS && TARGET_NEWABI
6170 && cfun->machine->global_pointer == regno);
6171
6172 /* Check call-saved registers. */
6173 if (regs_ever_live[regno] && !call_used_regs[regno])
6174 return true;
6175
6176 /* We need to save the old frame pointer before setting up a new one. */
6177 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6178 return true;
6179
6180 /* We need to save the incoming return address if it is ever clobbered
6181 within the function. */
6182 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6183 return true;
6184
6185 if (TARGET_MIPS16)
6186 {
6187 tree return_type;
6188
6189 return_type = DECL_RESULT (current_function_decl);
6190
6191 /* $18 is a special case in mips16 code. It may be used to call
6192 a function which returns a floating point value, but it is
6193 marked in call_used_regs. */
6194 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6195 return true;
6196
6197 /* $31 is also a special case. It will be used to copy a return
6198 value into the floating point registers if the return value is
6199 floating point. */
6200 if (regno == GP_REG_FIRST + 31
6201 && mips16_hard_float
6202 && !aggregate_value_p (return_type, current_function_decl)
6203 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6204 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6205 return true;
6206 }
6207
6208 return false;
6209 }
6210
6211
6212 /* Return the bytes needed to compute the frame pointer from the current
6213 stack pointer. SIZE is the size (in bytes) of the local variables.
6214
6215 MIPS stack frames look like:
6216
6217 Before call After call
6218 +-----------------------+ +-----------------------+
6219 high | | | |
6220 mem. | | | |
6221 | caller's temps. | | caller's temps. |
6222 | | | |
6223 +-----------------------+ +-----------------------+
6224 | | | |
6225 | arguments on stack. | | arguments on stack. |
6226 | | | |
6227 +-----------------------+ +-----------------------+
6228 | 4 words to save | | 4 words to save |
6229 | arguments passed | | arguments passed |
6230 | in registers, even | | in registers, even |
6231 SP->| if not passed. | VFP->| if not passed. |
6232 +-----------------------+ +-----------------------+
6233 | |
6234 | fp register save |
6235 | |
6236 +-----------------------+
6237 | |
6238 | gp register save |
6239 | |
6240 +-----------------------+
6241 | |
6242 | local variables |
6243 | |
6244 +-----------------------+
6245 | |
6246 | alloca allocations |
6247 | |
6248 +-----------------------+
6249 | |
6250 | GP save for V.4 abi |
6251 | |
6252 +-----------------------+
6253 | |
6254 | arguments on stack |
6255 | |
6256 +-----------------------+
6257 | 4 words to save |
6258 | arguments passed |
6259 | in registers, even |
6260 low SP->| if not passed. |
6261 memory +-----------------------+
6262
6263 */
6264
6265 HOST_WIDE_INT
6266 compute_frame_size (HOST_WIDE_INT size)
6267 {
6268 unsigned int regno;
6269 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6270 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6271 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6272 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6273 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6274 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6275 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6276 unsigned int mask; /* mask of saved gp registers */
6277 unsigned int fmask; /* mask of saved fp registers */
6278
6279 cfun->machine->global_pointer = mips_global_pointer ();
6280
6281 gp_reg_size = 0;
6282 fp_reg_size = 0;
6283 mask = 0;
6284 fmask = 0;
6285 var_size = MIPS_STACK_ALIGN (size);
6286 args_size = current_function_outgoing_args_size;
6287 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6288
6289 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6290 functions. If the function has local variables, we're committed
6291 to allocating it anyway. Otherwise reclaim it here. */
6292 if (var_size == 0 && current_function_is_leaf)
6293 cprestore_size = args_size = 0;
6294
6295 /* The MIPS 3.0 linker does not like functions that dynamically
6296 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6297 looks like we are trying to create a second frame pointer to the
6298 function, so allocate some stack space to make it happy. */
6299
6300 if (args_size == 0 && current_function_calls_alloca)
6301 args_size = 4 * UNITS_PER_WORD;
6302
6303 total_size = var_size + args_size + cprestore_size;
6304
6305 /* Calculate space needed for gp registers. */
6306 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6307 if (mips_save_reg_p (regno))
6308 {
6309 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6310 mask |= 1 << (regno - GP_REG_FIRST);
6311 }
6312
6313 /* We need to restore these for the handler. */
6314 if (current_function_calls_eh_return)
6315 {
6316 unsigned int i;
6317 for (i = 0; ; ++i)
6318 {
6319 regno = EH_RETURN_DATA_REGNO (i);
6320 if (regno == INVALID_REGNUM)
6321 break;
6322 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6323 mask |= 1 << (regno - GP_REG_FIRST);
6324 }
6325 }
6326
6327 /* This loop must iterate over the same space as its companion in
6328 save_restore_insns. */
6329 for (regno = (FP_REG_LAST - FP_INC + 1);
6330 regno >= FP_REG_FIRST;
6331 regno -= FP_INC)
6332 {
6333 if (mips_save_reg_p (regno))
6334 {
6335 fp_reg_size += FP_INC * UNITS_PER_FPREG;
6336 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6337 }
6338 }
6339
6340 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6341 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6342
6343 /* Add in the space required for saving incoming register arguments. */
6344 total_size += current_function_pretend_args_size;
6345 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6346
6347 /* Save other computed information. */
6348 cfun->machine->frame.total_size = total_size;
6349 cfun->machine->frame.var_size = var_size;
6350 cfun->machine->frame.args_size = args_size;
6351 cfun->machine->frame.cprestore_size = cprestore_size;
6352 cfun->machine->frame.gp_reg_size = gp_reg_size;
6353 cfun->machine->frame.fp_reg_size = fp_reg_size;
6354 cfun->machine->frame.mask = mask;
6355 cfun->machine->frame.fmask = fmask;
6356 cfun->machine->frame.initialized = reload_completed;
6357 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6358 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6359
6360 if (mask)
6361 {
6362 HOST_WIDE_INT offset;
6363
6364 offset = (args_size + cprestore_size + var_size
6365 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6366 cfun->machine->frame.gp_sp_offset = offset;
6367 cfun->machine->frame.gp_save_offset = offset - total_size;
6368 }
6369 else
6370 {
6371 cfun->machine->frame.gp_sp_offset = 0;
6372 cfun->machine->frame.gp_save_offset = 0;
6373 }
6374
6375 if (fmask)
6376 {
6377 HOST_WIDE_INT offset;
6378
6379 offset = (args_size + cprestore_size + var_size
6380 + gp_reg_rounded + fp_reg_size
6381 - FP_INC * UNITS_PER_FPREG);
6382 cfun->machine->frame.fp_sp_offset = offset;
6383 cfun->machine->frame.fp_save_offset = offset - total_size;
6384 }
6385 else
6386 {
6387 cfun->machine->frame.fp_sp_offset = 0;
6388 cfun->machine->frame.fp_save_offset = 0;
6389 }
6390
6391 /* Ok, we're done. */
6392 return total_size;
6393 }
6394 \f
6395 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6396 pointer or argument pointer. TO is either the stack pointer or
6397 hard frame pointer. */
6398
6399 HOST_WIDE_INT
6400 mips_initial_elimination_offset (int from, int to)
6401 {
6402 HOST_WIDE_INT offset;
6403
6404 compute_frame_size (get_frame_size ());
6405
6406 /* Set OFFSET to the offset from the stack pointer. */
6407 switch (from)
6408 {
6409 case FRAME_POINTER_REGNUM:
6410 offset = 0;
6411 break;
6412
6413 case ARG_POINTER_REGNUM:
6414 offset = (cfun->machine->frame.total_size
6415 - current_function_pretend_args_size);
6416 break;
6417
6418 default:
6419 gcc_unreachable ();
6420 }
6421
6422 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6423 offset -= cfun->machine->frame.args_size;
6424
6425 return offset;
6426 }
6427 \f
6428 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6429 back to a previous frame. */
6430 rtx
6431 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6432 {
6433 if (count != 0)
6434 return const0_rtx;
6435
6436 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6437 }
6438 \f
6439 /* Use FN to save or restore register REGNO. MODE is the register's
6440 mode and OFFSET is the offset of its save slot from the current
6441 stack pointer. */
6442
6443 static void
6444 mips_save_restore_reg (enum machine_mode mode, int regno,
6445 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6446 {
6447 rtx mem;
6448
6449 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
6450
6451 fn (gen_rtx_REG (mode, regno), mem);
6452 }
6453
6454
6455 /* Call FN for each register that is saved by the current function.
6456 SP_OFFSET is the offset of the current stack pointer from the start
6457 of the frame. */
6458
6459 static void
6460 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6461 {
6462 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6463
6464 enum machine_mode fpr_mode;
6465 HOST_WIDE_INT offset;
6466 int regno;
6467
6468 /* Save registers starting from high to low. The debuggers prefer at least
6469 the return register be stored at func+4, and also it allows us not to
6470 need a nop in the epilog if at least one register is reloaded in
6471 addition to return address. */
6472 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6473 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6474 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6475 {
6476 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6477 offset -= GET_MODE_SIZE (gpr_mode);
6478 }
6479
6480 /* This loop must iterate over the same space as its companion in
6481 compute_frame_size. */
6482 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6483 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6484 for (regno = (FP_REG_LAST - FP_INC + 1);
6485 regno >= FP_REG_FIRST;
6486 regno -= FP_INC)
6487 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6488 {
6489 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6490 offset -= GET_MODE_SIZE (fpr_mode);
6491 }
6492 #undef BITSET_P
6493 }
6494 \f
6495 /* If we're generating n32 or n64 abicalls, and the current function
6496 does not use $28 as its global pointer, emit a cplocal directive.
6497 Use pic_offset_table_rtx as the argument to the directive. */
6498
6499 static void
6500 mips_output_cplocal (void)
6501 {
6502 if (!TARGET_EXPLICIT_RELOCS
6503 && cfun->machine->global_pointer > 0
6504 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6505 output_asm_insn (".cplocal %+", 0);
6506 }
6507
6508 /* Return the style of GP load sequence that is being used for the
6509 current function. */
6510
6511 enum mips_loadgp_style
6512 mips_current_loadgp_style (void)
6513 {
6514 if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
6515 return LOADGP_NONE;
6516
6517 if (TARGET_ABSOLUTE_ABICALLS)
6518 return LOADGP_ABSOLUTE;
6519
6520 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6521 }
6522
6523 /* The __gnu_local_gp symbol. */
6524
6525 static GTY(()) rtx mips_gnu_local_gp;
6526
6527 /* If we're generating n32 or n64 abicalls, emit instructions
6528 to set up the global pointer. */
6529
6530 static void
6531 mips_emit_loadgp (void)
6532 {
6533 rtx addr, offset, incoming_address;
6534
6535 switch (mips_current_loadgp_style ())
6536 {
6537 case LOADGP_ABSOLUTE:
6538 if (mips_gnu_local_gp == NULL)
6539 {
6540 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6541 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6542 }
6543 emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6544 break;
6545
6546 case LOADGP_NEWABI:
6547 addr = XEXP (DECL_RTL (current_function_decl), 0);
6548 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6549 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6550 emit_insn (gen_loadgp (offset, incoming_address));
6551 if (!TARGET_EXPLICIT_RELOCS)
6552 emit_insn (gen_loadgp_blockage ());
6553 break;
6554
6555 default:
6556 break;
6557 }
6558 }
6559
6560 /* Set up the stack and frame (if desired) for the function. */
6561
6562 static void
6563 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6564 {
6565 const char *fnname;
6566 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6567
6568 #ifdef SDB_DEBUGGING_INFO
6569 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6570 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6571 #endif
6572
6573 /* In mips16 mode, we may need to generate a 32 bit to handle
6574 floating point arguments. The linker will arrange for any 32 bit
6575 functions to call this stub, which will then jump to the 16 bit
6576 function proper. */
6577 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6578 && current_function_args_info.fp_code != 0)
6579 build_mips16_function_stub (file);
6580
6581 if (!FUNCTION_NAME_ALREADY_DECLARED)
6582 {
6583 /* Get the function name the same way that toplev.c does before calling
6584 assemble_start_function. This is needed so that the name used here
6585 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6586 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6587
6588 if (!flag_inhibit_size_directive)
6589 {
6590 fputs ("\t.ent\t", file);
6591 assemble_name (file, fnname);
6592 fputs ("\n", file);
6593 }
6594
6595 assemble_name (file, fnname);
6596 fputs (":\n", file);
6597 }
6598
6599 /* Stop mips_file_end from treating this function as external. */
6600 if (TARGET_IRIX && mips_abi == ABI_32)
6601 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6602
6603 if (!flag_inhibit_size_directive)
6604 {
6605 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6606 fprintf (file,
6607 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6608 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6609 ", args= " HOST_WIDE_INT_PRINT_DEC
6610 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6611 (reg_names[(frame_pointer_needed)
6612 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6613 ((frame_pointer_needed && TARGET_MIPS16)
6614 ? tsize - cfun->machine->frame.args_size
6615 : tsize),
6616 reg_names[GP_REG_FIRST + 31],
6617 cfun->machine->frame.var_size,
6618 cfun->machine->frame.num_gp,
6619 cfun->machine->frame.num_fp,
6620 cfun->machine->frame.args_size,
6621 cfun->machine->frame.cprestore_size);
6622
6623 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6624 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6625 cfun->machine->frame.mask,
6626 cfun->machine->frame.gp_save_offset);
6627 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6628 cfun->machine->frame.fmask,
6629 cfun->machine->frame.fp_save_offset);
6630
6631 /* Require:
6632 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6633 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6634 }
6635
6636 if (mips_current_loadgp_style () == LOADGP_OLDABI)
6637 {
6638 /* Handle the initialization of $gp for SVR4 PIC. */
6639 if (!cfun->machine->all_noreorder_p)
6640 output_asm_insn ("%(.cpload\t%^%)", 0);
6641 else
6642 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6643 }
6644 else if (cfun->machine->all_noreorder_p)
6645 output_asm_insn ("%(%<", 0);
6646
6647 /* Tell the assembler which register we're using as the global
6648 pointer. This is needed for thunks, since they can use either
6649 explicit relocs or assembler macros. */
6650 mips_output_cplocal ();
6651 }
6652 \f
6653 /* Make the last instruction frame related and note that it performs
6654 the operation described by FRAME_PATTERN. */
6655
6656 static void
6657 mips_set_frame_expr (rtx frame_pattern)
6658 {
6659 rtx insn;
6660
6661 insn = get_last_insn ();
6662 RTX_FRAME_RELATED_P (insn) = 1;
6663 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6664 frame_pattern,
6665 REG_NOTES (insn));
6666 }
6667
6668
6669 /* Return a frame-related rtx that stores REG at MEM.
6670 REG must be a single register. */
6671
6672 static rtx
6673 mips_frame_set (rtx mem, rtx reg)
6674 {
6675 rtx set;
6676
6677 /* If we're saving the return address register and the dwarf return
6678 address column differs from the hard register number, adjust the
6679 note reg to refer to the former. */
6680 if (REGNO (reg) == GP_REG_FIRST + 31
6681 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6682 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6683
6684 set = gen_rtx_SET (VOIDmode, mem, reg);
6685 RTX_FRAME_RELATED_P (set) = 1;
6686
6687 return set;
6688 }
6689
6690
6691 /* Save register REG to MEM. Make the instruction frame-related. */
6692
6693 static void
6694 mips_save_reg (rtx reg, rtx mem)
6695 {
6696 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6697 {
6698 rtx x1, x2;
6699
6700 if (mips_split_64bit_move_p (mem, reg))
6701 mips_split_64bit_move (mem, reg);
6702 else
6703 emit_move_insn (mem, reg);
6704
6705 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6706 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6707 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6708 }
6709 else
6710 {
6711 if (TARGET_MIPS16
6712 && REGNO (reg) != GP_REG_FIRST + 31
6713 && !M16_REG_P (REGNO (reg)))
6714 {
6715 /* Save a non-mips16 register by moving it through a temporary.
6716 We don't need to do this for $31 since there's a special
6717 instruction for it. */
6718 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6719 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6720 }
6721 else
6722 emit_move_insn (mem, reg);
6723
6724 mips_set_frame_expr (mips_frame_set (mem, reg));
6725 }
6726 }
6727
6728
6729 /* Expand the prologue into a bunch of separate insns. */
6730
6731 void
6732 mips_expand_prologue (void)
6733 {
6734 HOST_WIDE_INT size;
6735
6736 if (cfun->machine->global_pointer > 0)
6737 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6738
6739 size = compute_frame_size (get_frame_size ());
6740
6741 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6742 bytes beforehand; this is enough to cover the register save area
6743 without going out of range. */
6744 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6745 {
6746 HOST_WIDE_INT step1;
6747
6748 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6749 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6750 stack_pointer_rtx,
6751 GEN_INT (-step1)))) = 1;
6752 size -= step1;
6753 mips_for_each_saved_reg (size, mips_save_reg);
6754 }
6755
6756 /* Allocate the rest of the frame. */
6757 if (size > 0)
6758 {
6759 if (SMALL_OPERAND (-size))
6760 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6761 stack_pointer_rtx,
6762 GEN_INT (-size)))) = 1;
6763 else
6764 {
6765 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6766 if (TARGET_MIPS16)
6767 {
6768 /* There are no instructions to add or subtract registers
6769 from the stack pointer, so use the frame pointer as a
6770 temporary. We should always be using a frame pointer
6771 in this case anyway. */
6772 gcc_assert (frame_pointer_needed);
6773 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6774 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6775 hard_frame_pointer_rtx,
6776 MIPS_PROLOGUE_TEMP (Pmode)));
6777 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6778 }
6779 else
6780 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6781 stack_pointer_rtx,
6782 MIPS_PROLOGUE_TEMP (Pmode)));
6783
6784 /* Describe the combined effect of the previous instructions. */
6785 mips_set_frame_expr
6786 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6787 plus_constant (stack_pointer_rtx, -size)));
6788 }
6789 }
6790
6791 /* Set up the frame pointer, if we're using one. In mips16 code,
6792 we point the frame pointer ahead of the outgoing argument area.
6793 This should allow more variables & incoming arguments to be
6794 accessed with unextended instructions. */
6795 if (frame_pointer_needed)
6796 {
6797 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6798 {
6799 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6800 if (SMALL_OPERAND (cfun->machine->frame.args_size))
6801 RTX_FRAME_RELATED_P
6802 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6803 stack_pointer_rtx,
6804 offset))) = 1;
6805 else
6806 {
6807 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6808 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6809 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6810 hard_frame_pointer_rtx,
6811 MIPS_PROLOGUE_TEMP (Pmode)));
6812 mips_set_frame_expr
6813 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6814 plus_constant (stack_pointer_rtx,
6815 cfun->machine->frame.args_size)));
6816 }
6817 }
6818 else
6819 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6820 stack_pointer_rtx)) = 1;
6821 }
6822
6823 mips_emit_loadgp ();
6824
6825 /* If generating o32/o64 abicalls, save $gp on the stack. */
6826 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6827 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6828
6829 /* If we are profiling, make sure no instructions are scheduled before
6830 the call to mcount. */
6831
6832 if (current_function_profile)
6833 emit_insn (gen_blockage ());
6834 }
6835 \f
6836 /* Do any necessary cleanup after a function to restore stack, frame,
6837 and regs. */
6838
6839 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6840
6841 static void
6842 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6843 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6844 {
6845 /* Reinstate the normal $gp. */
6846 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6847 mips_output_cplocal ();
6848
6849 if (cfun->machine->all_noreorder_p)
6850 {
6851 /* Avoid using %>%) since it adds excess whitespace. */
6852 output_asm_insn (".set\tmacro", 0);
6853 output_asm_insn (".set\treorder", 0);
6854 set_noreorder = set_nomacro = 0;
6855 }
6856
6857 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6858 {
6859 const char *fnname;
6860
6861 /* Get the function name the same way that toplev.c does before calling
6862 assemble_start_function. This is needed so that the name used here
6863 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6864 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6865 fputs ("\t.end\t", file);
6866 assemble_name (file, fnname);
6867 fputs ("\n", file);
6868 }
6869 }
6870 \f
6871 /* Emit instructions to restore register REG from slot MEM. */
6872
6873 static void
6874 mips_restore_reg (rtx reg, rtx mem)
6875 {
6876 /* There's no mips16 instruction to load $31 directly. Load into
6877 $7 instead and adjust the return insn appropriately. */
6878 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6879 reg = gen_rtx_REG (GET_MODE (reg), 7);
6880
6881 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6882 {
6883 /* Can't restore directly; move through a temporary. */
6884 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6885 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6886 }
6887 else
6888 emit_move_insn (reg, mem);
6889 }
6890
6891
6892 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6893 if this epilogue precedes a sibling call, false if it is for a normal
6894 "epilogue" pattern. */
6895
6896 void
6897 mips_expand_epilogue (int sibcall_p)
6898 {
6899 HOST_WIDE_INT step1, step2;
6900 rtx base, target;
6901
6902 if (!sibcall_p && mips_can_use_return_insn ())
6903 {
6904 emit_jump_insn (gen_return ());
6905 return;
6906 }
6907
6908 /* Split the frame into two. STEP1 is the amount of stack we should
6909 deallocate before restoring the registers. STEP2 is the amount we
6910 should deallocate afterwards.
6911
6912 Start off by assuming that no registers need to be restored. */
6913 step1 = cfun->machine->frame.total_size;
6914 step2 = 0;
6915
6916 /* Work out which register holds the frame address. Account for the
6917 frame pointer offset used by mips16 code. */
6918 if (!frame_pointer_needed)
6919 base = stack_pointer_rtx;
6920 else
6921 {
6922 base = hard_frame_pointer_rtx;
6923 if (TARGET_MIPS16)
6924 step1 -= cfun->machine->frame.args_size;
6925 }
6926
6927 /* If we need to restore registers, deallocate as much stack as
6928 possible in the second step without going out of range. */
6929 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6930 {
6931 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6932 step1 -= step2;
6933 }
6934
6935 /* Set TARGET to BASE + STEP1. */
6936 target = base;
6937 if (step1 > 0)
6938 {
6939 rtx adjust;
6940
6941 /* Get an rtx for STEP1 that we can add to BASE. */
6942 adjust = GEN_INT (step1);
6943 if (!SMALL_OPERAND (step1))
6944 {
6945 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6946 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6947 }
6948
6949 /* Normal mode code can copy the result straight into $sp. */
6950 if (!TARGET_MIPS16)
6951 target = stack_pointer_rtx;
6952
6953 emit_insn (gen_add3_insn (target, base, adjust));
6954 }
6955
6956 /* Copy TARGET into the stack pointer. */
6957 if (target != stack_pointer_rtx)
6958 emit_move_insn (stack_pointer_rtx, target);
6959
6960 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6961 implicitly used by all SYMBOL_REFs. We must emit a blockage
6962 insn before restoring it. */
6963 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6964 emit_insn (gen_blockage ());
6965
6966 /* Restore the registers. */
6967 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6968 mips_restore_reg);
6969
6970 /* Deallocate the final bit of the frame. */
6971 if (step2 > 0)
6972 emit_insn (gen_add3_insn (stack_pointer_rtx,
6973 stack_pointer_rtx,
6974 GEN_INT (step2)));
6975
6976 /* Add in the __builtin_eh_return stack adjustment. We need to
6977 use a temporary in mips16 code. */
6978 if (current_function_calls_eh_return)
6979 {
6980 if (TARGET_MIPS16)
6981 {
6982 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6983 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6984 MIPS_EPILOGUE_TEMP (Pmode),
6985 EH_RETURN_STACKADJ_RTX));
6986 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6987 }
6988 else
6989 emit_insn (gen_add3_insn (stack_pointer_rtx,
6990 stack_pointer_rtx,
6991 EH_RETURN_STACKADJ_RTX));
6992 }
6993
6994 if (!sibcall_p)
6995 {
6996 /* The mips16 loads the return address into $7, not $31. */
6997 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6998 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6999 GP_REG_FIRST + 7)));
7000 else
7001 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7002 GP_REG_FIRST + 31)));
7003 }
7004 }
7005 \f
7006 /* Return nonzero if this function is known to have a null epilogue.
7007 This allows the optimizer to omit jumps to jumps if no stack
7008 was created. */
7009
7010 int
7011 mips_can_use_return_insn (void)
7012 {
7013 tree return_type;
7014
7015 if (! reload_completed)
7016 return 0;
7017
7018 if (regs_ever_live[31] || current_function_profile)
7019 return 0;
7020
7021 return_type = DECL_RESULT (current_function_decl);
7022
7023 /* In mips16 mode, a function which returns a floating point value
7024 needs to arrange to copy the return value into the floating point
7025 registers. */
7026 if (TARGET_MIPS16
7027 && mips16_hard_float
7028 && ! aggregate_value_p (return_type, current_function_decl)
7029 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7030 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7031 return 0;
7032
7033 if (cfun->machine->frame.initialized)
7034 return cfun->machine->frame.total_size == 0;
7035
7036 return compute_frame_size (get_frame_size ()) == 0;
7037 }
7038 \f
7039 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7040 in order to avoid duplicating too much logic from elsewhere. */
7041
7042 static void
7043 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7044 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7045 tree function)
7046 {
7047 rtx this, temp1, temp2, insn, fnaddr;
7048
7049 /* Pretend to be a post-reload pass while generating rtl. */
7050 no_new_pseudos = 1;
7051 reload_completed = 1;
7052 reset_block_changes ();
7053
7054 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
7055 for TARGET_NEWABI since the latter is a call-saved register. */
7056 if (TARGET_ABICALLS)
7057 cfun->machine->global_pointer
7058 = REGNO (pic_offset_table_rtx)
7059 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
7060
7061 /* Set up the global pointer for n32 or n64 abicalls. */
7062 mips_emit_loadgp ();
7063
7064 /* We need two temporary registers in some cases. */
7065 temp1 = gen_rtx_REG (Pmode, 2);
7066 temp2 = gen_rtx_REG (Pmode, 3);
7067
7068 /* Find out which register contains the "this" pointer. */
7069 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7070 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7071 else
7072 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7073
7074 /* Add DELTA to THIS. */
7075 if (delta != 0)
7076 {
7077 rtx offset = GEN_INT (delta);
7078 if (!SMALL_OPERAND (delta))
7079 {
7080 emit_move_insn (temp1, offset);
7081 offset = temp1;
7082 }
7083 emit_insn (gen_add3_insn (this, this, offset));
7084 }
7085
7086 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7087 if (vcall_offset != 0)
7088 {
7089 rtx addr;
7090
7091 /* Set TEMP1 to *THIS. */
7092 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7093
7094 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7095 addr = mips_add_offset (temp2, temp1, vcall_offset);
7096
7097 /* Load the offset and add it to THIS. */
7098 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7099 emit_insn (gen_add3_insn (this, this, temp1));
7100 }
7101
7102 /* Jump to the target function. Use a sibcall if direct jumps are
7103 allowed, otherwise load the address into a register first. */
7104 fnaddr = XEXP (DECL_RTL (function), 0);
7105 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
7106 {
7107 /* This is messy. gas treats "la $25,foo" as part of a call
7108 sequence and may allow a global "foo" to be lazily bound.
7109 The general move patterns therefore reject this combination.
7110
7111 In this context, lazy binding would actually be OK for o32 and o64,
7112 but it's still wrong for n32 and n64; see mips_load_call_address.
7113 We must therefore load the address via a temporary register if
7114 mips_dangerous_for_la25_p.
7115
7116 If we jump to the temporary register rather than $25, the assembler
7117 can use the move insn to fill the jump's delay slot. */
7118 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
7119 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7120 mips_load_call_address (temp1, fnaddr, true);
7121
7122 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7123 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7124 emit_jump_insn (gen_indirect_jump (temp1));
7125 }
7126 else
7127 {
7128 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7129 SIBLING_CALL_P (insn) = 1;
7130 }
7131
7132 /* Run just enough of rest_of_compilation. This sequence was
7133 "borrowed" from alpha.c. */
7134 insn = get_insns ();
7135 insn_locators_initialize ();
7136 split_all_insns_noflow ();
7137 if (TARGET_MIPS16)
7138 mips16_lay_out_constants ();
7139 shorten_branches (insn);
7140 final_start_function (insn, file, 1);
7141 final (insn, file, 1);
7142 final_end_function ();
7143
7144 /* Clean up the vars set above. Note that final_end_function resets
7145 the global pointer for us. */
7146 reload_completed = 0;
7147 no_new_pseudos = 0;
7148 }
7149 \f
7150 /* Returns nonzero if X contains a SYMBOL_REF. */
7151
7152 static int
7153 symbolic_expression_p (rtx x)
7154 {
7155 if (GET_CODE (x) == SYMBOL_REF)
7156 return 1;
7157
7158 if (GET_CODE (x) == CONST)
7159 return symbolic_expression_p (XEXP (x, 0));
7160
7161 if (UNARY_P (x))
7162 return symbolic_expression_p (XEXP (x, 0));
7163
7164 if (ARITHMETIC_P (x))
7165 return (symbolic_expression_p (XEXP (x, 0))
7166 || symbolic_expression_p (XEXP (x, 1)));
7167
7168 return 0;
7169 }
7170
7171 /* Choose the section to use for the constant rtx expression X that has
7172 mode MODE. */
7173
7174 static section *
7175 mips_select_rtx_section (enum machine_mode mode, rtx x,
7176 unsigned HOST_WIDE_INT align)
7177 {
7178 if (TARGET_MIPS16)
7179 {
7180 /* In mips16 mode, the constant table always goes in the same section
7181 as the function, so that constants can be loaded using PC relative
7182 addressing. */
7183 return function_section (current_function_decl);
7184 }
7185 else if (TARGET_EMBEDDED_DATA)
7186 {
7187 /* For embedded applications, always put constants in read-only data,
7188 in order to reduce RAM usage. */
7189 return mergeable_constant_section (mode, align, 0);
7190 }
7191 else
7192 {
7193 /* For hosted applications, always put constants in small data if
7194 possible, as this gives the best performance. */
7195 /* ??? Consider using mergeable small data sections. */
7196
7197 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7198 && mips_section_threshold > 0)
7199 return get_named_section (NULL, ".sdata", 0);
7200 else if (flag_pic && symbolic_expression_p (x))
7201 return get_named_section (NULL, ".data.rel.ro", 3);
7202 else
7203 return mergeable_constant_section (mode, align, 0);
7204 }
7205 }
7206
7207 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7208
7209 The complication here is that, with the combination TARGET_ABICALLS
7210 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7211 therefore not be included in the read-only part of a DSO. Handle such
7212 cases by selecting a normal data section instead of a read-only one.
7213 The logic apes that in default_function_rodata_section. */
7214
7215 static section *
7216 mips_function_rodata_section (tree decl)
7217 {
7218 if (!TARGET_ABICALLS || TARGET_GPWORD)
7219 return default_function_rodata_section (decl);
7220
7221 if (decl && DECL_SECTION_NAME (decl))
7222 {
7223 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7224 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7225 {
7226 char *rname = ASTRDUP (name);
7227 rname[14] = 'd';
7228 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7229 }
7230 else if (flag_function_sections && flag_data_sections
7231 && strncmp (name, ".text.", 6) == 0)
7232 {
7233 char *rname = ASTRDUP (name);
7234 memcpy (rname + 1, "data", 4);
7235 return get_section (rname, SECTION_WRITE, decl);
7236 }
7237 }
7238 return data_section;
7239 }
7240
7241 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
7242 access DECL using %gp_rel(...)($gp). */
7243
7244 static bool
7245 mips_in_small_data_p (tree decl)
7246 {
7247 HOST_WIDE_INT size;
7248
7249 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7250 return false;
7251
7252 /* We don't yet generate small-data references for -mabicalls. See related
7253 -G handling in override_options. */
7254 if (TARGET_ABICALLS)
7255 return false;
7256
7257 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7258 {
7259 const char *name;
7260
7261 /* Reject anything that isn't in a known small-data section. */
7262 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7263 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7264 return false;
7265
7266 /* If a symbol is defined externally, the assembler will use the
7267 usual -G rules when deciding how to implement macros. */
7268 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7269 return true;
7270 }
7271 else if (TARGET_EMBEDDED_DATA)
7272 {
7273 /* Don't put constants into the small data section: we want them
7274 to be in ROM rather than RAM. */
7275 if (TREE_CODE (decl) != VAR_DECL)
7276 return false;
7277
7278 if (TREE_READONLY (decl)
7279 && !TREE_SIDE_EFFECTS (decl)
7280 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7281 return false;
7282 }
7283
7284 size = int_size_in_bytes (TREE_TYPE (decl));
7285 return (size > 0 && size <= mips_section_threshold);
7286 }
7287
7288 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7289 anchors for small data: the GP register acts as an anchor in that
7290 case. We also don't want to use them for PC-relative accesses,
7291 where the PC acts as an anchor. */
7292
7293 static bool
7294 mips_use_anchors_for_symbol_p (rtx symbol)
7295 {
7296 switch (mips_classify_symbol (symbol))
7297 {
7298 case SYMBOL_CONSTANT_POOL:
7299 case SYMBOL_SMALL_DATA:
7300 return false;
7301
7302 default:
7303 return true;
7304 }
7305 }
7306 \f
7307 /* See whether VALTYPE is a record whose fields should be returned in
7308 floating-point registers. If so, return the number of fields and
7309 list them in FIELDS (which should have two elements). Return 0
7310 otherwise.
7311
7312 For n32 & n64, a structure with one or two fields is returned in
7313 floating-point registers as long as every field has a floating-point
7314 type. */
7315
7316 static int
7317 mips_fpr_return_fields (tree valtype, tree *fields)
7318 {
7319 tree field;
7320 int i;
7321
7322 if (!TARGET_NEWABI)
7323 return 0;
7324
7325 if (TREE_CODE (valtype) != RECORD_TYPE)
7326 return 0;
7327
7328 i = 0;
7329 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7330 {
7331 if (TREE_CODE (field) != FIELD_DECL)
7332 continue;
7333
7334 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7335 return 0;
7336
7337 if (i == 2)
7338 return 0;
7339
7340 fields[i++] = field;
7341 }
7342 return i;
7343 }
7344
7345
7346 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
7347 a value in the most significant part of $2/$3 if:
7348
7349 - the target is big-endian;
7350
7351 - the value has a structure or union type (we generalize this to
7352 cover aggregates from other languages too); and
7353
7354 - the structure is not returned in floating-point registers. */
7355
7356 static bool
7357 mips_return_in_msb (tree valtype)
7358 {
7359 tree fields[2];
7360
7361 return (TARGET_NEWABI
7362 && TARGET_BIG_ENDIAN
7363 && AGGREGATE_TYPE_P (valtype)
7364 && mips_fpr_return_fields (valtype, fields) == 0);
7365 }
7366
7367
7368 /* Return a composite value in a pair of floating-point registers.
7369 MODE1 and OFFSET1 are the mode and byte offset for the first value,
7370 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
7371 complete value.
7372
7373 For n32 & n64, $f0 always holds the first value and $f2 the second.
7374 Otherwise the values are packed together as closely as possible. */
7375
7376 static rtx
7377 mips_return_fpr_pair (enum machine_mode mode,
7378 enum machine_mode mode1, HOST_WIDE_INT offset1,
7379 enum machine_mode mode2, HOST_WIDE_INT offset2)
7380 {
7381 int inc;
7382
7383 inc = (TARGET_NEWABI ? 2 : FP_INC);
7384 return gen_rtx_PARALLEL
7385 (mode,
7386 gen_rtvec (2,
7387 gen_rtx_EXPR_LIST (VOIDmode,
7388 gen_rtx_REG (mode1, FP_RETURN),
7389 GEN_INT (offset1)),
7390 gen_rtx_EXPR_LIST (VOIDmode,
7391 gen_rtx_REG (mode2, FP_RETURN + inc),
7392 GEN_INT (offset2))));
7393
7394 }
7395
7396
7397 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
7398 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
7399 VALTYPE is null and MODE is the mode of the return value. */
7400
7401 rtx
7402 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7403 enum machine_mode mode)
7404 {
7405 if (valtype)
7406 {
7407 tree fields[2];
7408 int unsignedp;
7409
7410 mode = TYPE_MODE (valtype);
7411 unsignedp = TYPE_UNSIGNED (valtype);
7412
7413 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7414 true, we must promote the mode just as PROMOTE_MODE does. */
7415 mode = promote_mode (valtype, mode, &unsignedp, 1);
7416
7417 /* Handle structures whose fields are returned in $f0/$f2. */
7418 switch (mips_fpr_return_fields (valtype, fields))
7419 {
7420 case 1:
7421 return gen_rtx_REG (mode, FP_RETURN);
7422
7423 case 2:
7424 return mips_return_fpr_pair (mode,
7425 TYPE_MODE (TREE_TYPE (fields[0])),
7426 int_byte_position (fields[0]),
7427 TYPE_MODE (TREE_TYPE (fields[1])),
7428 int_byte_position (fields[1]));
7429 }
7430
7431 /* If a value is passed in the most significant part of a register, see
7432 whether we have to round the mode up to a whole number of words. */
7433 if (mips_return_in_msb (valtype))
7434 {
7435 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7436 if (size % UNITS_PER_WORD != 0)
7437 {
7438 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7439 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7440 }
7441 }
7442
7443 /* For EABI, the class of return register depends entirely on MODE.
7444 For example, "struct { some_type x; }" and "union { some_type x; }"
7445 are returned in the same way as a bare "some_type" would be.
7446 Other ABIs only use FPRs for scalar, complex or vector types. */
7447 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7448 return gen_rtx_REG (mode, GP_RETURN);
7449 }
7450
7451 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7452 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7453 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7454 return gen_rtx_REG (mode, FP_RETURN);
7455
7456 /* Handle long doubles for n32 & n64. */
7457 if (mode == TFmode)
7458 return mips_return_fpr_pair (mode,
7459 DImode, 0,
7460 DImode, GET_MODE_SIZE (mode) / 2);
7461
7462 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7463 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7464 return mips_return_fpr_pair (mode,
7465 GET_MODE_INNER (mode), 0,
7466 GET_MODE_INNER (mode),
7467 GET_MODE_SIZE (mode) / 2);
7468
7469 return gen_rtx_REG (mode, GP_RETURN);
7470 }
7471
7472 /* Return nonzero when an argument must be passed by reference. */
7473
7474 static bool
7475 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7476 enum machine_mode mode, tree type,
7477 bool named ATTRIBUTE_UNUSED)
7478 {
7479 if (mips_abi == ABI_EABI)
7480 {
7481 int size;
7482
7483 /* ??? How should SCmode be handled? */
7484 if (type == NULL_TREE || mode == DImode || mode == DFmode)
7485 return 0;
7486
7487 size = int_size_in_bytes (type);
7488 return size == -1 || size > UNITS_PER_WORD;
7489 }
7490 else
7491 {
7492 /* If we have a variable-sized parameter, we have no choice. */
7493 return targetm.calls.must_pass_in_stack (mode, type);
7494 }
7495 }
7496
7497 static bool
7498 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7499 enum machine_mode mode ATTRIBUTE_UNUSED,
7500 tree type ATTRIBUTE_UNUSED, bool named)
7501 {
7502 return mips_abi == ABI_EABI && named;
7503 }
7504
7505 /* Return true if registers of class CLASS cannot change from mode FROM
7506 to mode TO. */
7507
7508 bool
7509 mips_cannot_change_mode_class (enum machine_mode from,
7510 enum machine_mode to, enum reg_class class)
7511 {
7512 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7513 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7514 {
7515 if (TARGET_BIG_ENDIAN)
7516 {
7517 /* When a multi-word value is stored in paired floating-point
7518 registers, the first register always holds the low word.
7519 We therefore can't allow FPRs to change between single-word
7520 and multi-word modes. */
7521 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7522 return true;
7523 }
7524 else
7525 {
7526 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7527 in LO and HI, the high word always comes first. We therefore
7528 can't allow values stored in HI to change between single-word
7529 and multi-word modes.
7530 This rule applies to both the original HI/LO pair and the new
7531 DSP accumulators. */
7532 if (reg_classes_intersect_p (ACC_REGS, class))
7533 return true;
7534 }
7535 }
7536 /* Loading a 32-bit value into a 64-bit floating-point register
7537 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7538 We can't allow 64-bit float registers to change from SImode to
7539 to a wider mode. */
7540 if (TARGET_FLOAT64
7541 && from == SImode
7542 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7543 && reg_classes_intersect_p (FP_REGS, class))
7544 return true;
7545 return false;
7546 }
7547
7548 /* Return true if X should not be moved directly into register $25.
7549 We need this because many versions of GAS will treat "la $25,foo" as
7550 part of a call sequence and so allow a global "foo" to be lazily bound. */
7551
7552 bool
7553 mips_dangerous_for_la25_p (rtx x)
7554 {
7555 HOST_WIDE_INT offset;
7556
7557 if (TARGET_EXPLICIT_RELOCS)
7558 return false;
7559
7560 mips_split_const (x, &x, &offset);
7561 return global_got_operand (x, VOIDmode);
7562 }
7563
7564 /* Implement PREFERRED_RELOAD_CLASS. */
7565
7566 enum reg_class
7567 mips_preferred_reload_class (rtx x, enum reg_class class)
7568 {
7569 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7570 return LEA_REGS;
7571
7572 if (TARGET_HARD_FLOAT
7573 && FLOAT_MODE_P (GET_MODE (x))
7574 && reg_class_subset_p (FP_REGS, class))
7575 return FP_REGS;
7576
7577 if (reg_class_subset_p (GR_REGS, class))
7578 class = GR_REGS;
7579
7580 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7581 class = M16_REGS;
7582
7583 return class;
7584 }
7585
7586 /* This function returns the register class required for a secondary
7587 register when copying between one of the registers in CLASS, and X,
7588 using MODE. If IN_P is nonzero, the copy is going from X to the
7589 register, otherwise the register is the source. A return value of
7590 NO_REGS means that no secondary register is required. */
7591
7592 enum reg_class
7593 mips_secondary_reload_class (enum reg_class class,
7594 enum machine_mode mode, rtx x, int in_p)
7595 {
7596 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7597 int regno = -1;
7598 int gp_reg_p;
7599
7600 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7601 regno = true_regnum (x);
7602
7603 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7604
7605 if (mips_dangerous_for_la25_p (x))
7606 {
7607 gr_regs = LEA_REGS;
7608 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7609 return gr_regs;
7610 }
7611
7612 /* Copying from HI or LO to anywhere other than a general register
7613 requires a general register.
7614 This rule applies to both the original HI/LO pair and the new
7615 DSP accumulators. */
7616 if (reg_class_subset_p (class, ACC_REGS))
7617 {
7618 if (TARGET_MIPS16 && in_p)
7619 {
7620 /* We can't really copy to HI or LO at all in mips16 mode. */
7621 return M16_REGS;
7622 }
7623 return gp_reg_p ? NO_REGS : gr_regs;
7624 }
7625 if (ACC_REG_P (regno))
7626 {
7627 if (TARGET_MIPS16 && ! in_p)
7628 {
7629 /* We can't really copy to HI or LO at all in mips16 mode. */
7630 return M16_REGS;
7631 }
7632 return class == gr_regs ? NO_REGS : gr_regs;
7633 }
7634
7635 /* We can only copy a value to a condition code register from a
7636 floating point register, and even then we require a scratch
7637 floating point register. We can only copy a value out of a
7638 condition code register into a general register. */
7639 if (class == ST_REGS)
7640 {
7641 if (in_p)
7642 return FP_REGS;
7643 return gp_reg_p ? NO_REGS : gr_regs;
7644 }
7645 if (ST_REG_P (regno))
7646 {
7647 if (! in_p)
7648 return FP_REGS;
7649 return class == gr_regs ? NO_REGS : gr_regs;
7650 }
7651
7652 if (class == FP_REGS)
7653 {
7654 if (MEM_P (x))
7655 {
7656 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7657 return NO_REGS;
7658 }
7659 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7660 {
7661 /* We can use the l.s and l.d macros to load floating-point
7662 constants. ??? For l.s, we could probably get better
7663 code by returning GR_REGS here. */
7664 return NO_REGS;
7665 }
7666 else if (gp_reg_p || x == CONST0_RTX (mode))
7667 {
7668 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7669 return NO_REGS;
7670 }
7671 else if (FP_REG_P (regno))
7672 {
7673 /* In this case we can use mov.s or mov.d. */
7674 return NO_REGS;
7675 }
7676 else
7677 {
7678 /* Otherwise, we need to reload through an integer register. */
7679 return gr_regs;
7680 }
7681 }
7682
7683 /* In mips16 mode, going between memory and anything but M16_REGS
7684 requires an M16_REG. */
7685 if (TARGET_MIPS16)
7686 {
7687 if (class != M16_REGS && class != M16_NA_REGS)
7688 {
7689 if (gp_reg_p)
7690 return NO_REGS;
7691 return M16_REGS;
7692 }
7693 if (! gp_reg_p)
7694 {
7695 if (class == M16_REGS || class == M16_NA_REGS)
7696 return NO_REGS;
7697 return M16_REGS;
7698 }
7699 }
7700
7701 return NO_REGS;
7702 }
7703
7704 /* Implement CLASS_MAX_NREGS.
7705
7706 Usually all registers are word-sized. The only supported exception
7707 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7708 registers. A word-based calculation is correct even in that case,
7709 since -msingle-float disallows multi-FPR values.
7710
7711 The FP status registers are an exception to this rule. They are always
7712 4 bytes wide as they only hold condition code modes, and CCmode is always
7713 considered to be 4 bytes wide. */
7714
7715 int
7716 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7717 enum machine_mode mode)
7718 {
7719 if (class == ST_REGS)
7720 return (GET_MODE_SIZE (mode) + 3) / 4;
7721 else
7722 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7723 }
7724
7725 static bool
7726 mips_valid_pointer_mode (enum machine_mode mode)
7727 {
7728 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7729 }
7730
7731 /* Target hook for vector_mode_supported_p. */
7732
7733 static bool
7734 mips_vector_mode_supported_p (enum machine_mode mode)
7735 {
7736 switch (mode)
7737 {
7738 case V2SFmode:
7739 return TARGET_PAIRED_SINGLE_FLOAT;
7740
7741 case V2HImode:
7742 case V4QImode:
7743 return TARGET_DSP;
7744
7745 default:
7746 return false;
7747 }
7748 }
7749 \f
7750 /* If we can access small data directly (using gp-relative relocation
7751 operators) return the small data pointer, otherwise return null.
7752
7753 For each mips16 function which refers to GP relative symbols, we
7754 use a pseudo register, initialized at the start of the function, to
7755 hold the $gp value. */
7756
7757 static rtx
7758 mips16_gp_pseudo_reg (void)
7759 {
7760 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7761 {
7762 rtx unspec;
7763 rtx insn, scan;
7764
7765 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7766
7767 /* We want to initialize this to a value which gcc will believe
7768 is constant. */
7769 start_sequence ();
7770 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7771 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7772 gen_rtx_CONST (Pmode, unspec));
7773 insn = get_insns ();
7774 end_sequence ();
7775
7776 push_topmost_sequence ();
7777 /* We need to emit the initialization after the FUNCTION_BEG
7778 note, so that it will be integrated. */
7779 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7780 if (NOTE_P (scan)
7781 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7782 break;
7783 if (scan == NULL_RTX)
7784 scan = get_insns ();
7785 insn = emit_insn_after (insn, scan);
7786 pop_topmost_sequence ();
7787 }
7788
7789 return cfun->machine->mips16_gp_pseudo_rtx;
7790 }
7791
7792 /* Write out code to move floating point arguments in or out of
7793 general registers. Output the instructions to FILE. FP_CODE is
7794 the code describing which arguments are present (see the comment at
7795 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7796 we are copying from the floating point registers. */
7797
7798 static void
7799 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7800 {
7801 const char *s;
7802 int gparg, fparg;
7803 unsigned int f;
7804
7805 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7806 gcc_assert (TARGET_OLDABI);
7807
7808 if (from_fp_p)
7809 s = "mfc1";
7810 else
7811 s = "mtc1";
7812 gparg = GP_ARG_FIRST;
7813 fparg = FP_ARG_FIRST;
7814 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7815 {
7816 if ((f & 3) == 1)
7817 {
7818 if ((fparg & 1) != 0)
7819 ++fparg;
7820 fprintf (file, "\t%s\t%s,%s\n", s,
7821 reg_names[gparg], reg_names[fparg]);
7822 }
7823 else if ((f & 3) == 2)
7824 {
7825 if (TARGET_64BIT)
7826 fprintf (file, "\td%s\t%s,%s\n", s,
7827 reg_names[gparg], reg_names[fparg]);
7828 else
7829 {
7830 if ((fparg & 1) != 0)
7831 ++fparg;
7832 if (TARGET_BIG_ENDIAN)
7833 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7834 reg_names[gparg], reg_names[fparg + 1], s,
7835 reg_names[gparg + 1], reg_names[fparg]);
7836 else
7837 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7838 reg_names[gparg], reg_names[fparg], s,
7839 reg_names[gparg + 1], reg_names[fparg + 1]);
7840 ++gparg;
7841 ++fparg;
7842 }
7843 }
7844 else
7845 gcc_unreachable ();
7846
7847 ++gparg;
7848 ++fparg;
7849 }
7850 }
7851
7852 /* Build a mips16 function stub. This is used for functions which
7853 take arguments in the floating point registers. It is 32 bit code
7854 that moves the floating point args into the general registers, and
7855 then jumps to the 16 bit code. */
7856
7857 static void
7858 build_mips16_function_stub (FILE *file)
7859 {
7860 const char *fnname;
7861 char *secname, *stubname;
7862 tree stubid, stubdecl;
7863 int need_comma;
7864 unsigned int f;
7865
7866 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7867 secname = (char *) alloca (strlen (fnname) + 20);
7868 sprintf (secname, ".mips16.fn.%s", fnname);
7869 stubname = (char *) alloca (strlen (fnname) + 20);
7870 sprintf (stubname, "__fn_stub_%s", fnname);
7871 stubid = get_identifier (stubname);
7872 stubdecl = build_decl (FUNCTION_DECL, stubid,
7873 build_function_type (void_type_node, NULL_TREE));
7874 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7875
7876 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7877 need_comma = 0;
7878 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7879 {
7880 fprintf (file, "%s%s",
7881 need_comma ? ", " : "",
7882 (f & 3) == 1 ? "float" : "double");
7883 need_comma = 1;
7884 }
7885 fprintf (file, ")\n");
7886
7887 fprintf (file, "\t.set\tnomips16\n");
7888 switch_to_section (function_section (stubdecl));
7889 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7890
7891 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7892 within a .ent, and we cannot emit another .ent. */
7893 if (!FUNCTION_NAME_ALREADY_DECLARED)
7894 {
7895 fputs ("\t.ent\t", file);
7896 assemble_name (file, stubname);
7897 fputs ("\n", file);
7898 }
7899
7900 assemble_name (file, stubname);
7901 fputs (":\n", file);
7902
7903 /* We don't want the assembler to insert any nops here. */
7904 fprintf (file, "\t.set\tnoreorder\n");
7905
7906 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7907
7908 fprintf (asm_out_file, "\t.set\tnoat\n");
7909 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7910 assemble_name (file, fnname);
7911 fprintf (file, "\n");
7912 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7913 fprintf (asm_out_file, "\t.set\tat\n");
7914
7915 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7916 with one of the mfc1 instructions, because the result is not
7917 available for one instruction, so if the very first instruction
7918 in the function refers to the register, it will see the wrong
7919 value. */
7920 fprintf (file, "\tnop\n");
7921
7922 fprintf (file, "\t.set\treorder\n");
7923
7924 if (!FUNCTION_NAME_ALREADY_DECLARED)
7925 {
7926 fputs ("\t.end\t", file);
7927 assemble_name (file, stubname);
7928 fputs ("\n", file);
7929 }
7930
7931 fprintf (file, "\t.set\tmips16\n");
7932
7933 switch_to_section (function_section (current_function_decl));
7934 }
7935
7936 /* We keep a list of functions for which we have already built stubs
7937 in build_mips16_call_stub. */
7938
7939 struct mips16_stub
7940 {
7941 struct mips16_stub *next;
7942 char *name;
7943 int fpret;
7944 };
7945
7946 static struct mips16_stub *mips16_stubs;
7947
7948 /* Build a call stub for a mips16 call. A stub is needed if we are
7949 passing any floating point values which should go into the floating
7950 point registers. If we are, and the call turns out to be to a 32
7951 bit function, the stub will be used to move the values into the
7952 floating point registers before calling the 32 bit function. The
7953 linker will magically adjust the function call to either the 16 bit
7954 function or the 32 bit stub, depending upon where the function call
7955 is actually defined.
7956
7957 Similarly, we need a stub if the return value might come back in a
7958 floating point register.
7959
7960 RETVAL is the location of the return value, or null if this is
7961 a call rather than a call_value. FN is the address of the
7962 function and ARG_SIZE is the size of the arguments. FP_CODE
7963 is the code built by function_arg. This function returns a nonzero
7964 value if it builds the call instruction itself. */
7965
7966 int
7967 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7968 {
7969 int fpret;
7970 const char *fnname;
7971 char *secname, *stubname;
7972 struct mips16_stub *l;
7973 tree stubid, stubdecl;
7974 int need_comma;
7975 unsigned int f;
7976
7977 /* We don't need to do anything if we aren't in mips16 mode, or if
7978 we were invoked with the -msoft-float option. */
7979 if (! TARGET_MIPS16 || ! mips16_hard_float)
7980 return 0;
7981
7982 /* Figure out whether the value might come back in a floating point
7983 register. */
7984 fpret = (retval != 0
7985 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7986 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7987
7988 /* We don't need to do anything if there were no floating point
7989 arguments and the value will not be returned in a floating point
7990 register. */
7991 if (fp_code == 0 && ! fpret)
7992 return 0;
7993
7994 /* We don't need to do anything if this is a call to a special
7995 mips16 support function. */
7996 if (GET_CODE (fn) == SYMBOL_REF
7997 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7998 return 0;
7999
8000 /* This code will only work for o32 and o64 abis. The other ABI's
8001 require more sophisticated support. */
8002 gcc_assert (TARGET_OLDABI);
8003
8004 /* We can only handle SFmode and DFmode floating point return
8005 values. */
8006 if (fpret)
8007 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8008
8009 /* If we're calling via a function pointer, then we must always call
8010 via a stub. There are magic stubs provided in libgcc.a for each
8011 of the required cases. Each of them expects the function address
8012 to arrive in register $2. */
8013
8014 if (GET_CODE (fn) != SYMBOL_REF)
8015 {
8016 char buf[30];
8017 tree id;
8018 rtx stub_fn, insn;
8019
8020 /* ??? If this code is modified to support other ABI's, we need
8021 to handle PARALLEL return values here. */
8022
8023 sprintf (buf, "__mips16_call_stub_%s%d",
8024 (fpret
8025 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8026 : ""),
8027 fp_code);
8028 id = get_identifier (buf);
8029 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8030
8031 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8032
8033 if (retval == NULL_RTX)
8034 insn = gen_call_internal (stub_fn, arg_size);
8035 else
8036 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8037 insn = emit_call_insn (insn);
8038
8039 /* Put the register usage information on the CALL. */
8040 CALL_INSN_FUNCTION_USAGE (insn) =
8041 gen_rtx_EXPR_LIST (VOIDmode,
8042 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8043 CALL_INSN_FUNCTION_USAGE (insn));
8044
8045 /* If we are handling a floating point return value, we need to
8046 save $18 in the function prologue. Putting a note on the
8047 call will mean that regs_ever_live[$18] will be true if the
8048 call is not eliminated, and we can check that in the prologue
8049 code. */
8050 if (fpret)
8051 CALL_INSN_FUNCTION_USAGE (insn) =
8052 gen_rtx_EXPR_LIST (VOIDmode,
8053 gen_rtx_USE (VOIDmode,
8054 gen_rtx_REG (word_mode, 18)),
8055 CALL_INSN_FUNCTION_USAGE (insn));
8056
8057 /* Return 1 to tell the caller that we've generated the call
8058 insn. */
8059 return 1;
8060 }
8061
8062 /* We know the function we are going to call. If we have already
8063 built a stub, we don't need to do anything further. */
8064
8065 fnname = XSTR (fn, 0);
8066 for (l = mips16_stubs; l != NULL; l = l->next)
8067 if (strcmp (l->name, fnname) == 0)
8068 break;
8069
8070 if (l == NULL)
8071 {
8072 /* Build a special purpose stub. When the linker sees a
8073 function call in mips16 code, it will check where the target
8074 is defined. If the target is a 32 bit call, the linker will
8075 search for the section defined here. It can tell which
8076 symbol this section is associated with by looking at the
8077 relocation information (the name is unreliable, since this
8078 might be a static function). If such a section is found, the
8079 linker will redirect the call to the start of the magic
8080 section.
8081
8082 If the function does not return a floating point value, the
8083 special stub section is named
8084 .mips16.call.FNNAME
8085
8086 If the function does return a floating point value, the stub
8087 section is named
8088 .mips16.call.fp.FNNAME
8089 */
8090
8091 secname = (char *) alloca (strlen (fnname) + 40);
8092 sprintf (secname, ".mips16.call.%s%s",
8093 fpret ? "fp." : "",
8094 fnname);
8095 stubname = (char *) alloca (strlen (fnname) + 20);
8096 sprintf (stubname, "__call_stub_%s%s",
8097 fpret ? "fp_" : "",
8098 fnname);
8099 stubid = get_identifier (stubname);
8100 stubdecl = build_decl (FUNCTION_DECL, stubid,
8101 build_function_type (void_type_node, NULL_TREE));
8102 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8103
8104 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8105 (fpret
8106 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
8107 : ""),
8108 fnname);
8109 need_comma = 0;
8110 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8111 {
8112 fprintf (asm_out_file, "%s%s",
8113 need_comma ? ", " : "",
8114 (f & 3) == 1 ? "float" : "double");
8115 need_comma = 1;
8116 }
8117 fprintf (asm_out_file, ")\n");
8118
8119 fprintf (asm_out_file, "\t.set\tnomips16\n");
8120 assemble_start_function (stubdecl, stubname);
8121
8122 if (!FUNCTION_NAME_ALREADY_DECLARED)
8123 {
8124 fputs ("\t.ent\t", asm_out_file);
8125 assemble_name (asm_out_file, stubname);
8126 fputs ("\n", asm_out_file);
8127
8128 assemble_name (asm_out_file, stubname);
8129 fputs (":\n", asm_out_file);
8130 }
8131
8132 /* We build the stub code by hand. That's the only way we can
8133 do it, since we can't generate 32 bit code during a 16 bit
8134 compilation. */
8135
8136 /* We don't want the assembler to insert any nops here. */
8137 fprintf (asm_out_file, "\t.set\tnoreorder\n");
8138
8139 mips16_fp_args (asm_out_file, fp_code, 0);
8140
8141 if (! fpret)
8142 {
8143 fprintf (asm_out_file, "\t.set\tnoat\n");
8144 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8145 fnname);
8146 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8147 fprintf (asm_out_file, "\t.set\tat\n");
8148 /* Unfortunately, we can't fill the jump delay slot. We
8149 can't fill with one of the mtc1 instructions, because the
8150 result is not available for one instruction, so if the
8151 very first instruction in the function refers to the
8152 register, it will see the wrong value. */
8153 fprintf (asm_out_file, "\tnop\n");
8154 }
8155 else
8156 {
8157 fprintf (asm_out_file, "\tmove\t%s,%s\n",
8158 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8159 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8160 /* As above, we can't fill the delay slot. */
8161 fprintf (asm_out_file, "\tnop\n");
8162 if (GET_MODE (retval) == SFmode)
8163 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8164 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8165 else
8166 {
8167 if (TARGET_BIG_ENDIAN)
8168 {
8169 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8170 reg_names[GP_REG_FIRST + 2],
8171 reg_names[FP_REG_FIRST + 1]);
8172 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8173 reg_names[GP_REG_FIRST + 3],
8174 reg_names[FP_REG_FIRST + 0]);
8175 }
8176 else
8177 {
8178 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8179 reg_names[GP_REG_FIRST + 2],
8180 reg_names[FP_REG_FIRST + 0]);
8181 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8182 reg_names[GP_REG_FIRST + 3],
8183 reg_names[FP_REG_FIRST + 1]);
8184 }
8185 }
8186 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8187 /* As above, we can't fill the delay slot. */
8188 fprintf (asm_out_file, "\tnop\n");
8189 }
8190
8191 fprintf (asm_out_file, "\t.set\treorder\n");
8192
8193 #ifdef ASM_DECLARE_FUNCTION_SIZE
8194 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8195 #endif
8196
8197 if (!FUNCTION_NAME_ALREADY_DECLARED)
8198 {
8199 fputs ("\t.end\t", asm_out_file);
8200 assemble_name (asm_out_file, stubname);
8201 fputs ("\n", asm_out_file);
8202 }
8203
8204 fprintf (asm_out_file, "\t.set\tmips16\n");
8205
8206 /* Record this stub. */
8207 l = (struct mips16_stub *) xmalloc (sizeof *l);
8208 l->name = xstrdup (fnname);
8209 l->fpret = fpret;
8210 l->next = mips16_stubs;
8211 mips16_stubs = l;
8212 }
8213
8214 /* If we expect a floating point return value, but we've built a
8215 stub which does not expect one, then we're in trouble. We can't
8216 use the existing stub, because it won't handle the floating point
8217 value. We can't build a new stub, because the linker won't know
8218 which stub to use for the various calls in this object file.
8219 Fortunately, this case is illegal, since it means that a function
8220 was declared in two different ways in a single compilation. */
8221 if (fpret && ! l->fpret)
8222 error ("cannot handle inconsistent calls to %qs", fnname);
8223
8224 /* If we are calling a stub which handles a floating point return
8225 value, we need to arrange to save $18 in the prologue. We do
8226 this by marking the function call as using the register. The
8227 prologue will later see that it is used, and emit code to save
8228 it. */
8229
8230 if (l->fpret)
8231 {
8232 rtx insn;
8233
8234 if (retval == NULL_RTX)
8235 insn = gen_call_internal (fn, arg_size);
8236 else
8237 insn = gen_call_value_internal (retval, fn, arg_size);
8238 insn = emit_call_insn (insn);
8239
8240 CALL_INSN_FUNCTION_USAGE (insn) =
8241 gen_rtx_EXPR_LIST (VOIDmode,
8242 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8243 CALL_INSN_FUNCTION_USAGE (insn));
8244
8245 /* Return 1 to tell the caller that we've generated the call
8246 insn. */
8247 return 1;
8248 }
8249
8250 /* Return 0 to let the caller generate the call insn. */
8251 return 0;
8252 }
8253
8254 /* An entry in the mips16 constant pool. VALUE is the pool constant,
8255 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
8256
8257 struct mips16_constant {
8258 struct mips16_constant *next;
8259 rtx value;
8260 rtx label;
8261 enum machine_mode mode;
8262 };
8263
8264 /* Information about an incomplete mips16 constant pool. FIRST is the
8265 first constant, HIGHEST_ADDRESS is the highest address that the first
8266 byte of the pool can have, and INSN_ADDRESS is the current instruction
8267 address. */
8268
8269 struct mips16_constant_pool {
8270 struct mips16_constant *first;
8271 int highest_address;
8272 int insn_address;
8273 };
8274
8275 /* Add constant VALUE to POOL and return its label. MODE is the
8276 value's mode (used for CONST_INTs, etc.). */
8277
8278 static rtx
8279 add_constant (struct mips16_constant_pool *pool,
8280 rtx value, enum machine_mode mode)
8281 {
8282 struct mips16_constant **p, *c;
8283 bool first_of_size_p;
8284
8285 /* See whether the constant is already in the pool. If so, return the
8286 existing label, otherwise leave P pointing to the place where the
8287 constant should be added.
8288
8289 Keep the pool sorted in increasing order of mode size so that we can
8290 reduce the number of alignments needed. */
8291 first_of_size_p = true;
8292 for (p = &pool->first; *p != 0; p = &(*p)->next)
8293 {
8294 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8295 return (*p)->label;
8296 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8297 break;
8298 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8299 first_of_size_p = false;
8300 }
8301
8302 /* In the worst case, the constant needed by the earliest instruction
8303 will end up at the end of the pool. The entire pool must then be
8304 accessible from that instruction.
8305
8306 When adding the first constant, set the pool's highest address to
8307 the address of the first out-of-range byte. Adjust this address
8308 downwards each time a new constant is added. */
8309 if (pool->first == 0)
8310 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8311 is the address of the instruction with the lowest two bits clear.
8312 The base PC value for ld has the lowest three bits clear. Assume
8313 the worst case here. */
8314 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8315 pool->highest_address -= GET_MODE_SIZE (mode);
8316 if (first_of_size_p)
8317 /* Take into account the worst possible padding due to alignment. */
8318 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8319
8320 /* Create a new entry. */
8321 c = (struct mips16_constant *) xmalloc (sizeof *c);
8322 c->value = value;
8323 c->mode = mode;
8324 c->label = gen_label_rtx ();
8325 c->next = *p;
8326 *p = c;
8327
8328 return c->label;
8329 }
8330
8331 /* Output constant VALUE after instruction INSN and return the last
8332 instruction emitted. MODE is the mode of the constant. */
8333
8334 static rtx
8335 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8336 {
8337 switch (GET_MODE_CLASS (mode))
8338 {
8339 case MODE_INT:
8340 {
8341 rtx size = GEN_INT (GET_MODE_SIZE (mode));
8342 return emit_insn_after (gen_consttable_int (value, size), insn);
8343 }
8344
8345 case MODE_FLOAT:
8346 return emit_insn_after (gen_consttable_float (value), insn);
8347
8348 case MODE_VECTOR_FLOAT:
8349 case MODE_VECTOR_INT:
8350 {
8351 int i;
8352 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8353 insn = dump_constants_1 (GET_MODE_INNER (mode),
8354 CONST_VECTOR_ELT (value, i), insn);
8355 return insn;
8356 }
8357
8358 default:
8359 gcc_unreachable ();
8360 }
8361 }
8362
8363
8364 /* Dump out the constants in CONSTANTS after INSN. */
8365
8366 static void
8367 dump_constants (struct mips16_constant *constants, rtx insn)
8368 {
8369 struct mips16_constant *c, *next;
8370 int align;
8371
8372 align = 0;
8373 for (c = constants; c != NULL; c = next)
8374 {
8375 /* If necessary, increase the alignment of PC. */
8376 if (align < GET_MODE_SIZE (c->mode))
8377 {
8378 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8379 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8380 }
8381 align = GET_MODE_SIZE (c->mode);
8382
8383 insn = emit_label_after (c->label, insn);
8384 insn = dump_constants_1 (c->mode, c->value, insn);
8385
8386 next = c->next;
8387 free (c);
8388 }
8389
8390 emit_barrier_after (insn);
8391 }
8392
8393 /* Return the length of instruction INSN. */
8394
8395 static int
8396 mips16_insn_length (rtx insn)
8397 {
8398 if (JUMP_P (insn))
8399 {
8400 rtx body = PATTERN (insn);
8401 if (GET_CODE (body) == ADDR_VEC)
8402 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8403 if (GET_CODE (body) == ADDR_DIFF_VEC)
8404 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8405 }
8406 return get_attr_length (insn);
8407 }
8408
8409 /* Rewrite *X so that constant pool references refer to the constant's
8410 label instead. DATA points to the constant pool structure. */
8411
8412 static int
8413 mips16_rewrite_pool_refs (rtx *x, void *data)
8414 {
8415 struct mips16_constant_pool *pool = data;
8416 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8417 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8418 get_pool_constant (*x),
8419 get_pool_mode (*x)));
8420 return 0;
8421 }
8422
8423 /* Build MIPS16 constant pools. */
8424
8425 static void
8426 mips16_lay_out_constants (void)
8427 {
8428 struct mips16_constant_pool pool;
8429 rtx insn, barrier;
8430
8431 barrier = 0;
8432 memset (&pool, 0, sizeof (pool));
8433 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8434 {
8435 /* Rewrite constant pool references in INSN. */
8436 if (INSN_P (insn))
8437 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8438
8439 pool.insn_address += mips16_insn_length (insn);
8440
8441 if (pool.first != NULL)
8442 {
8443 /* If there are no natural barriers between the first user of
8444 the pool and the highest acceptable address, we'll need to
8445 create a new instruction to jump around the constant pool.
8446 In the worst case, this instruction will be 4 bytes long.
8447
8448 If it's too late to do this transformation after INSN,
8449 do it immediately before INSN. */
8450 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8451 {
8452 rtx label, jump;
8453
8454 label = gen_label_rtx ();
8455
8456 jump = emit_jump_insn_before (gen_jump (label), insn);
8457 JUMP_LABEL (jump) = label;
8458 LABEL_NUSES (label) = 1;
8459 barrier = emit_barrier_after (jump);
8460
8461 emit_label_after (label, barrier);
8462 pool.insn_address += 4;
8463 }
8464
8465 /* See whether the constant pool is now out of range of the first
8466 user. If so, output the constants after the previous barrier.
8467 Note that any instructions between BARRIER and INSN (inclusive)
8468 will use negative offsets to refer to the pool. */
8469 if (pool.insn_address > pool.highest_address)
8470 {
8471 dump_constants (pool.first, barrier);
8472 pool.first = NULL;
8473 barrier = 0;
8474 }
8475 else if (BARRIER_P (insn))
8476 barrier = insn;
8477 }
8478 }
8479 dump_constants (pool.first, get_last_insn ());
8480 }
8481 \f
8482 /* A temporary variable used by for_each_rtx callbacks, etc. */
8483 static rtx mips_sim_insn;
8484
8485 /* A structure representing the state of the processor pipeline.
8486 Used by the mips_sim_* family of functions. */
8487 struct mips_sim {
8488 /* The maximum number of instructions that can be issued in a cycle.
8489 (Caches mips_issue_rate.) */
8490 unsigned int issue_rate;
8491
8492 /* The current simulation time. */
8493 unsigned int time;
8494
8495 /* How many more instructions can be issued in the current cycle. */
8496 unsigned int insns_left;
8497
8498 /* LAST_SET[X].INSN is the last instruction to set register X.
8499 LAST_SET[X].TIME is the time at which that instruction was issued.
8500 INSN is null if no instruction has yet set register X. */
8501 struct {
8502 rtx insn;
8503 unsigned int time;
8504 } last_set[FIRST_PSEUDO_REGISTER];
8505
8506 /* The pipeline's current DFA state. */
8507 state_t dfa_state;
8508 };
8509
8510 /* Reset STATE to the initial simulation state. */
8511
8512 static void
8513 mips_sim_reset (struct mips_sim *state)
8514 {
8515 state->time = 0;
8516 state->insns_left = state->issue_rate;
8517 memset (&state->last_set, 0, sizeof (state->last_set));
8518 state_reset (state->dfa_state);
8519 }
8520
8521 /* Initialize STATE before its first use. DFA_STATE points to an
8522 allocated but uninitialized DFA state. */
8523
8524 static void
8525 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8526 {
8527 state->issue_rate = mips_issue_rate ();
8528 state->dfa_state = dfa_state;
8529 mips_sim_reset (state);
8530 }
8531
8532 /* Advance STATE by one clock cycle. */
8533
8534 static void
8535 mips_sim_next_cycle (struct mips_sim *state)
8536 {
8537 state->time++;
8538 state->insns_left = state->issue_rate;
8539 state_transition (state->dfa_state, 0);
8540 }
8541
8542 /* Advance simulation state STATE until instruction INSN can read
8543 register REG. */
8544
8545 static void
8546 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8547 {
8548 unsigned int i;
8549
8550 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8551 if (state->last_set[REGNO (reg) + i].insn != 0)
8552 {
8553 unsigned int t;
8554
8555 t = state->last_set[REGNO (reg) + i].time;
8556 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8557 while (state->time < t)
8558 mips_sim_next_cycle (state);
8559 }
8560 }
8561
8562 /* A for_each_rtx callback. If *X is a register, advance simulation state
8563 DATA until mips_sim_insn can read the register's value. */
8564
8565 static int
8566 mips_sim_wait_regs_2 (rtx *x, void *data)
8567 {
8568 if (REG_P (*x))
8569 mips_sim_wait_reg (data, mips_sim_insn, *x);
8570 return 0;
8571 }
8572
8573 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8574
8575 static void
8576 mips_sim_wait_regs_1 (rtx *x, void *data)
8577 {
8578 for_each_rtx (x, mips_sim_wait_regs_2, data);
8579 }
8580
8581 /* Advance simulation state STATE until all of INSN's register
8582 dependencies are satisfied. */
8583
8584 static void
8585 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8586 {
8587 mips_sim_insn = insn;
8588 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8589 }
8590
8591 /* Advance simulation state STATE until the units required by
8592 instruction INSN are available. */
8593
8594 static void
8595 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8596 {
8597 state_t tmp_state;
8598
8599 tmp_state = alloca (state_size ());
8600 while (state->insns_left == 0
8601 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8602 state_transition (tmp_state, insn) >= 0))
8603 mips_sim_next_cycle (state);
8604 }
8605
8606 /* Advance simulation state STATE until INSN is ready to issue. */
8607
8608 static void
8609 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8610 {
8611 mips_sim_wait_regs (state, insn);
8612 mips_sim_wait_units (state, insn);
8613 }
8614
8615 /* mips_sim_insn has just set X. Update the LAST_SET array
8616 in simulation state DATA. */
8617
8618 static void
8619 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8620 {
8621 struct mips_sim *state;
8622 unsigned int i;
8623
8624 state = data;
8625 if (REG_P (x))
8626 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8627 {
8628 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8629 state->last_set[REGNO (x) + i].time = state->time;
8630 }
8631 }
8632
8633 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8634 can issue immediately (i.e., that mips_sim_wait_insn has already
8635 been called). */
8636
8637 static void
8638 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8639 {
8640 state_transition (state->dfa_state, insn);
8641 state->insns_left--;
8642
8643 mips_sim_insn = insn;
8644 note_stores (PATTERN (insn), mips_sim_record_set, state);
8645 }
8646
8647 /* Simulate issuing a NOP in state STATE. */
8648
8649 static void
8650 mips_sim_issue_nop (struct mips_sim *state)
8651 {
8652 if (state->insns_left == 0)
8653 mips_sim_next_cycle (state);
8654 state->insns_left--;
8655 }
8656
8657 /* Update simulation state STATE so that it's ready to accept the instruction
8658 after INSN. INSN should be part of the main rtl chain, not a member of a
8659 SEQUENCE. */
8660
8661 static void
8662 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8663 {
8664 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8665 if (JUMP_P (insn))
8666 mips_sim_issue_nop (state);
8667
8668 switch (GET_CODE (SEQ_BEGIN (insn)))
8669 {
8670 case CODE_LABEL:
8671 case CALL_INSN:
8672 /* We can't predict the processor state after a call or label. */
8673 mips_sim_reset (state);
8674 break;
8675
8676 case JUMP_INSN:
8677 /* The delay slots of branch likely instructions are only executed
8678 when the branch is taken. Therefore, if the caller has simulated
8679 the delay slot instruction, STATE does not really reflect the state
8680 of the pipeline for the instruction after the delay slot. Also,
8681 branch likely instructions tend to incur a penalty when not taken,
8682 so there will probably be an extra delay between the branch and
8683 the instruction after the delay slot. */
8684 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8685 mips_sim_reset (state);
8686 break;
8687
8688 default:
8689 break;
8690 }
8691 }
8692 \f
8693 /* The VR4130 pipeline issues aligned pairs of instructions together,
8694 but it stalls the second instruction if it depends on the first.
8695 In order to cut down the amount of logic required, this dependence
8696 check is not based on a full instruction decode. Instead, any non-SPECIAL
8697 instruction is assumed to modify the register specified by bits 20-16
8698 (which is usually the "rt" field).
8699
8700 In beq, beql, bne and bnel instructions, the rt field is actually an
8701 input, so we can end up with a false dependence between the branch
8702 and its delay slot. If this situation occurs in instruction INSN,
8703 try to avoid it by swapping rs and rt. */
8704
8705 static void
8706 vr4130_avoid_branch_rt_conflict (rtx insn)
8707 {
8708 rtx first, second;
8709
8710 first = SEQ_BEGIN (insn);
8711 second = SEQ_END (insn);
8712 if (JUMP_P (first)
8713 && NONJUMP_INSN_P (second)
8714 && GET_CODE (PATTERN (first)) == SET
8715 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8716 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8717 {
8718 /* Check for the right kind of condition. */
8719 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8720 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8721 && REG_P (XEXP (cond, 0))
8722 && REG_P (XEXP (cond, 1))
8723 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8724 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8725 {
8726 /* SECOND mentions the rt register but not the rs register. */
8727 rtx tmp = XEXP (cond, 0);
8728 XEXP (cond, 0) = XEXP (cond, 1);
8729 XEXP (cond, 1) = tmp;
8730 }
8731 }
8732 }
8733
8734 /* Implement -mvr4130-align. Go through each basic block and simulate the
8735 processor pipeline. If we find that a pair of instructions could execute
8736 in parallel, and the first of those instruction is not 8-byte aligned,
8737 insert a nop to make it aligned. */
8738
8739 static void
8740 vr4130_align_insns (void)
8741 {
8742 struct mips_sim state;
8743 rtx insn, subinsn, last, last2, next;
8744 bool aligned_p;
8745
8746 dfa_start ();
8747
8748 /* LAST is the last instruction before INSN to have a nonzero length.
8749 LAST2 is the last such instruction before LAST. */
8750 last = 0;
8751 last2 = 0;
8752
8753 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8754 aligned_p = true;
8755
8756 mips_sim_init (&state, alloca (state_size ()));
8757 for (insn = get_insns (); insn != 0; insn = next)
8758 {
8759 unsigned int length;
8760
8761 next = NEXT_INSN (insn);
8762
8763 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8764 This isn't really related to the alignment pass, but we do it on
8765 the fly to avoid a separate instruction walk. */
8766 vr4130_avoid_branch_rt_conflict (insn);
8767
8768 if (USEFUL_INSN_P (insn))
8769 FOR_EACH_SUBINSN (subinsn, insn)
8770 {
8771 mips_sim_wait_insn (&state, subinsn);
8772
8773 /* If we want this instruction to issue in parallel with the
8774 previous one, make sure that the previous instruction is
8775 aligned. There are several reasons why this isn't worthwhile
8776 when the second instruction is a call:
8777
8778 - Calls are less likely to be performance critical,
8779 - There's a good chance that the delay slot can execute
8780 in parallel with the call.
8781 - The return address would then be unaligned.
8782
8783 In general, if we're going to insert a nop between instructions
8784 X and Y, it's better to insert it immediately after X. That
8785 way, if the nop makes Y aligned, it will also align any labels
8786 between X and Y. */
8787 if (state.insns_left != state.issue_rate
8788 && !CALL_P (subinsn))
8789 {
8790 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8791 {
8792 /* SUBINSN is the first instruction in INSN and INSN is
8793 aligned. We want to align the previous instruction
8794 instead, so insert a nop between LAST2 and LAST.
8795
8796 Note that LAST could be either a single instruction
8797 or a branch with a delay slot. In the latter case,
8798 LAST, like INSN, is already aligned, but the delay
8799 slot must have some extra delay that stops it from
8800 issuing at the same time as the branch. We therefore
8801 insert a nop before the branch in order to align its
8802 delay slot. */
8803 emit_insn_after (gen_nop (), last2);
8804 aligned_p = false;
8805 }
8806 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8807 {
8808 /* SUBINSN is the delay slot of INSN, but INSN is
8809 currently unaligned. Insert a nop between
8810 LAST and INSN to align it. */
8811 emit_insn_after (gen_nop (), last);
8812 aligned_p = true;
8813 }
8814 }
8815 mips_sim_issue_insn (&state, subinsn);
8816 }
8817 mips_sim_finish_insn (&state, insn);
8818
8819 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8820 length = get_attr_length (insn);
8821 if (length > 0)
8822 {
8823 /* If the instruction is an asm statement or multi-instruction
8824 mips.md patern, the length is only an estimate. Insert an
8825 8 byte alignment after it so that the following instructions
8826 can be handled correctly. */
8827 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8828 && (recog_memoized (insn) < 0 || length >= 8))
8829 {
8830 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8831 next = NEXT_INSN (next);
8832 mips_sim_next_cycle (&state);
8833 aligned_p = true;
8834 }
8835 else if (length & 4)
8836 aligned_p = !aligned_p;
8837 last2 = last;
8838 last = insn;
8839 }
8840
8841 /* See whether INSN is an aligned label. */
8842 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8843 aligned_p = true;
8844 }
8845 dfa_finish ();
8846 }
8847 \f
8848 /* Subroutine of mips_reorg. If there is a hazard between INSN
8849 and a previous instruction, avoid it by inserting nops after
8850 instruction AFTER.
8851
8852 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8853 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8854 before using the value of that register. *HILO_DELAY counts the
8855 number of instructions since the last hilo hazard (that is,
8856 the number of instructions since the last mflo or mfhi).
8857
8858 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8859 for the next instruction.
8860
8861 LO_REG is an rtx for the LO register, used in dependence checking. */
8862
8863 static void
8864 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8865 rtx *delayed_reg, rtx lo_reg)
8866 {
8867 rtx pattern, set;
8868 int nops, ninsns;
8869
8870 if (!INSN_P (insn))
8871 return;
8872
8873 pattern = PATTERN (insn);
8874
8875 /* Do not put the whole function in .set noreorder if it contains
8876 an asm statement. We don't know whether there will be hazards
8877 between the asm statement and the gcc-generated code. */
8878 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8879 cfun->machine->all_noreorder_p = false;
8880
8881 /* Ignore zero-length instructions (barriers and the like). */
8882 ninsns = get_attr_length (insn) / 4;
8883 if (ninsns == 0)
8884 return;
8885
8886 /* Work out how many nops are needed. Note that we only care about
8887 registers that are explicitly mentioned in the instruction's pattern.
8888 It doesn't matter that calls use the argument registers or that they
8889 clobber hi and lo. */
8890 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8891 nops = 2 - *hilo_delay;
8892 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8893 nops = 1;
8894 else
8895 nops = 0;
8896
8897 /* Insert the nops between this instruction and the previous one.
8898 Each new nop takes us further from the last hilo hazard. */
8899 *hilo_delay += nops;
8900 while (nops-- > 0)
8901 emit_insn_after (gen_hazard_nop (), after);
8902
8903 /* Set up the state for the next instruction. */
8904 *hilo_delay += ninsns;
8905 *delayed_reg = 0;
8906 if (INSN_CODE (insn) >= 0)
8907 switch (get_attr_hazard (insn))
8908 {
8909 case HAZARD_NONE:
8910 break;
8911
8912 case HAZARD_HILO:
8913 *hilo_delay = 0;
8914 break;
8915
8916 case HAZARD_DELAY:
8917 set = single_set (insn);
8918 gcc_assert (set != 0);
8919 *delayed_reg = SET_DEST (set);
8920 break;
8921 }
8922 }
8923
8924
8925 /* Go through the instruction stream and insert nops where necessary.
8926 See if the whole function can then be put into .set noreorder &
8927 .set nomacro. */
8928
8929 static void
8930 mips_avoid_hazards (void)
8931 {
8932 rtx insn, last_insn, lo_reg, delayed_reg;
8933 int hilo_delay, i;
8934
8935 /* Force all instructions to be split into their final form. */
8936 split_all_insns_noflow ();
8937
8938 /* Recalculate instruction lengths without taking nops into account. */
8939 cfun->machine->ignore_hazard_length_p = true;
8940 shorten_branches (get_insns ());
8941
8942 cfun->machine->all_noreorder_p = true;
8943
8944 /* Profiled functions can't be all noreorder because the profiler
8945 support uses assembler macros. */
8946 if (current_function_profile)
8947 cfun->machine->all_noreorder_p = false;
8948
8949 /* Code compiled with -mfix-vr4120 can't be all noreorder because
8950 we rely on the assembler to work around some errata. */
8951 if (TARGET_FIX_VR4120)
8952 cfun->machine->all_noreorder_p = false;
8953
8954 /* The same is true for -mfix-vr4130 if we might generate mflo or
8955 mfhi instructions. Note that we avoid using mflo and mfhi if
8956 the VR4130 macc and dmacc instructions are available instead;
8957 see the *mfhilo_{si,di}_macc patterns. */
8958 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
8959 cfun->machine->all_noreorder_p = false;
8960
8961 last_insn = 0;
8962 hilo_delay = 2;
8963 delayed_reg = 0;
8964 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8965
8966 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8967 if (INSN_P (insn))
8968 {
8969 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8970 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8971 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8972 &hilo_delay, &delayed_reg, lo_reg);
8973 else
8974 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8975 &delayed_reg, lo_reg);
8976
8977 last_insn = insn;
8978 }
8979 }
8980
8981
8982 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8983
8984 static void
8985 mips_reorg (void)
8986 {
8987 if (TARGET_MIPS16)
8988 mips16_lay_out_constants ();
8989 else if (TARGET_EXPLICIT_RELOCS)
8990 {
8991 if (mips_flag_delayed_branch)
8992 dbr_schedule (get_insns ());
8993 mips_avoid_hazards ();
8994 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8995 vr4130_align_insns ();
8996 }
8997 }
8998
8999 /* This function does three things:
9000
9001 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9002 - Register the mips16 hardware floating point stubs.
9003 - Register the gofast functions if selected using --enable-gofast. */
9004
9005 #include "config/gofast.h"
9006
9007 static void
9008 mips_init_libfuncs (void)
9009 {
9010 if (TARGET_FIX_VR4120)
9011 {
9012 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9013 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9014 }
9015
9016 if (TARGET_MIPS16 && mips16_hard_float)
9017 {
9018 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9019 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9020 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9021 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9022
9023 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9024 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9025 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9026 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9027 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9028 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9029
9030 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9031 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9032
9033 if (TARGET_DOUBLE_FLOAT)
9034 {
9035 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9036 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9037 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9038 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9039
9040 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9041 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9042 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9043 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9044 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9045 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9046
9047 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9048 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9049
9050 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9051 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9052 }
9053 }
9054 else
9055 gofast_maybe_init_libfuncs ();
9056 }
9057
9058 /* Return a number assessing the cost of moving a register in class
9059 FROM to class TO. The classes are expressed using the enumeration
9060 values such as `GENERAL_REGS'. A value of 2 is the default; other
9061 values are interpreted relative to that.
9062
9063 It is not required that the cost always equal 2 when FROM is the
9064 same as TO; on some machines it is expensive to move between
9065 registers if they are not general registers.
9066
9067 If reload sees an insn consisting of a single `set' between two
9068 hard registers, and if `REGISTER_MOVE_COST' applied to their
9069 classes returns a value of 2, reload does not check to ensure that
9070 the constraints of the insn are met. Setting a cost of other than
9071 2 will allow reload to verify that the constraints are met. You
9072 should do this if the `movM' pattern's constraints do not allow
9073 such copying.
9074
9075 ??? We make the cost of moving from HI/LO into general
9076 registers the same as for one of moving general registers to
9077 HI/LO for TARGET_MIPS16 in order to prevent allocating a
9078 pseudo to HI/LO. This might hurt optimizations though, it
9079 isn't clear if it is wise. And it might not work in all cases. We
9080 could solve the DImode LO reg problem by using a multiply, just
9081 like reload_{in,out}si. We could solve the SImode/HImode HI reg
9082 problem by using divide instructions. divu puts the remainder in
9083 the HI reg, so doing a divide by -1 will move the value in the HI
9084 reg for all values except -1. We could handle that case by using a
9085 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
9086 a compare/branch to test the input value to see which instruction
9087 we need to use. This gets pretty messy, but it is feasible. */
9088
9089 int
9090 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9091 enum reg_class to, enum reg_class from)
9092 {
9093 if (from == M16_REGS && GR_REG_CLASS_P (to))
9094 return 2;
9095 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9096 return 2;
9097 else if (GR_REG_CLASS_P (from))
9098 {
9099 if (to == M16_REGS)
9100 return 2;
9101 else if (to == M16_NA_REGS)
9102 return 2;
9103 else if (GR_REG_CLASS_P (to))
9104 {
9105 if (TARGET_MIPS16)
9106 return 4;
9107 else
9108 return 2;
9109 }
9110 else if (to == FP_REGS)
9111 return 4;
9112 else if (reg_class_subset_p (to, ACC_REGS))
9113 {
9114 if (TARGET_MIPS16)
9115 return 12;
9116 else
9117 return 6;
9118 }
9119 else if (COP_REG_CLASS_P (to))
9120 {
9121 return 5;
9122 }
9123 }
9124 else if (from == FP_REGS)
9125 {
9126 if (GR_REG_CLASS_P (to))
9127 return 4;
9128 else if (to == FP_REGS)
9129 return 2;
9130 else if (to == ST_REGS)
9131 return 8;
9132 }
9133 else if (reg_class_subset_p (from, ACC_REGS))
9134 {
9135 if (GR_REG_CLASS_P (to))
9136 {
9137 if (TARGET_MIPS16)
9138 return 12;
9139 else
9140 return 6;
9141 }
9142 }
9143 else if (from == ST_REGS && GR_REG_CLASS_P (to))
9144 return 4;
9145 else if (COP_REG_CLASS_P (from))
9146 {
9147 return 5;
9148 }
9149
9150 /* Fall through.
9151 ??? What cases are these? Shouldn't we return 2 here? */
9152
9153 return 12;
9154 }
9155
9156 /* Return the length of INSN. LENGTH is the initial length computed by
9157 attributes in the machine-description file. */
9158
9159 int
9160 mips_adjust_insn_length (rtx insn, int length)
9161 {
9162 /* A unconditional jump has an unfilled delay slot if it is not part
9163 of a sequence. A conditional jump normally has a delay slot, but
9164 does not on MIPS16. */
9165 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9166 length += 4;
9167
9168 /* See how many nops might be needed to avoid hardware hazards. */
9169 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9170 switch (get_attr_hazard (insn))
9171 {
9172 case HAZARD_NONE:
9173 break;
9174
9175 case HAZARD_DELAY:
9176 length += 4;
9177 break;
9178
9179 case HAZARD_HILO:
9180 length += 8;
9181 break;
9182 }
9183
9184 /* All MIPS16 instructions are a measly two bytes. */
9185 if (TARGET_MIPS16)
9186 length /= 2;
9187
9188 return length;
9189 }
9190
9191
9192 /* Return an asm sequence to start a noat block and load the address
9193 of a label into $1. */
9194
9195 const char *
9196 mips_output_load_label (void)
9197 {
9198 if (TARGET_EXPLICIT_RELOCS)
9199 switch (mips_abi)
9200 {
9201 case ABI_N32:
9202 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9203
9204 case ABI_64:
9205 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9206
9207 default:
9208 if (ISA_HAS_LOAD_DELAY)
9209 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9210 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9211 }
9212 else
9213 {
9214 if (Pmode == DImode)
9215 return "%[dla\t%@,%0";
9216 else
9217 return "%[la\t%@,%0";
9218 }
9219 }
9220
9221 /* Return the assembly code for INSN, which has the operands given by
9222 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9223 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9224 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9225 version of BRANCH_IF_TRUE. */
9226
9227 const char *
9228 mips_output_conditional_branch (rtx insn, rtx *operands,
9229 const char *branch_if_true,
9230 const char *branch_if_false)
9231 {
9232 unsigned int length;
9233 rtx taken, not_taken;
9234
9235 length = get_attr_length (insn);
9236 if (length <= 8)
9237 {
9238 /* Just a simple conditional branch. */
9239 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9240 return branch_if_true;
9241 }
9242
9243 /* Generate a reversed branch around a direct jump. This fallback does
9244 not use branch-likely instructions. */
9245 mips_branch_likely = false;
9246 not_taken = gen_label_rtx ();
9247 taken = operands[1];
9248
9249 /* Generate the reversed branch to NOT_TAKEN. */
9250 operands[1] = not_taken;
9251 output_asm_insn (branch_if_false, operands);
9252
9253 /* If INSN has a delay slot, we must provide delay slots for both the
9254 branch to NOT_TAKEN and the conditional jump. We must also ensure
9255 that INSN's delay slot is executed in the appropriate cases. */
9256 if (final_sequence)
9257 {
9258 /* This first delay slot will always be executed, so use INSN's
9259 delay slot if is not annulled. */
9260 if (!INSN_ANNULLED_BRANCH_P (insn))
9261 {
9262 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9263 asm_out_file, optimize, 1, NULL);
9264 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9265 }
9266 else
9267 output_asm_insn ("nop", 0);
9268 fprintf (asm_out_file, "\n");
9269 }
9270
9271 /* Output the unconditional branch to TAKEN. */
9272 if (length <= 16)
9273 output_asm_insn ("j\t%0%/", &taken);
9274 else
9275 {
9276 output_asm_insn (mips_output_load_label (), &taken);
9277 output_asm_insn ("jr\t%@%]%/", 0);
9278 }
9279
9280 /* Now deal with its delay slot; see above. */
9281 if (final_sequence)
9282 {
9283 /* This delay slot will only be executed if the branch is taken.
9284 Use INSN's delay slot if is annulled. */
9285 if (INSN_ANNULLED_BRANCH_P (insn))
9286 {
9287 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9288 asm_out_file, optimize, 1, NULL);
9289 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9290 }
9291 else
9292 output_asm_insn ("nop", 0);
9293 fprintf (asm_out_file, "\n");
9294 }
9295
9296 /* Output NOT_TAKEN. */
9297 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9298 CODE_LABEL_NUMBER (not_taken));
9299 return "";
9300 }
9301
9302 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9303 if some ordered condition is true. The condition is given by
9304 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9305 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9306 its second is always zero. */
9307
9308 const char *
9309 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9310 {
9311 const char *branch[2];
9312
9313 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9314 Make BRANCH[0] branch on the inverse condition. */
9315 switch (GET_CODE (operands[0]))
9316 {
9317 /* These cases are equivalent to comparisons against zero. */
9318 case LEU:
9319 inverted_p = !inverted_p;
9320 /* Fall through. */
9321 case GTU:
9322 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9323 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9324 break;
9325
9326 /* These cases are always true or always false. */
9327 case LTU:
9328 inverted_p = !inverted_p;
9329 /* Fall through. */
9330 case GEU:
9331 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9332 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9333 break;
9334
9335 default:
9336 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9337 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9338 break;
9339 }
9340 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9341 }
9342 \f
9343 /* Used to output div or ddiv instruction DIVISION, which has the operands
9344 given by OPERANDS. Add in a divide-by-zero check if needed.
9345
9346 When working around R4000 and R4400 errata, we need to make sure that
9347 the division is not immediately followed by a shift[1][2]. We also
9348 need to stop the division from being put into a branch delay slot[3].
9349 The easiest way to avoid both problems is to add a nop after the
9350 division. When a divide-by-zero check is needed, this nop can be
9351 used to fill the branch delay slot.
9352
9353 [1] If a double-word or a variable shift executes immediately
9354 after starting an integer division, the shift may give an
9355 incorrect result. See quotations of errata #16 and #28 from
9356 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9357 in mips.md for details.
9358
9359 [2] A similar bug to [1] exists for all revisions of the
9360 R4000 and the R4400 when run in an MC configuration.
9361 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9362
9363 "19. In this following sequence:
9364
9365 ddiv (or ddivu or div or divu)
9366 dsll32 (or dsrl32, dsra32)
9367
9368 if an MPT stall occurs, while the divide is slipping the cpu
9369 pipeline, then the following double shift would end up with an
9370 incorrect result.
9371
9372 Workaround: The compiler needs to avoid generating any
9373 sequence with divide followed by extended double shift."
9374
9375 This erratum is also present in "MIPS R4400MC Errata, Processor
9376 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9377 & 3.0" as errata #10 and #4, respectively.
9378
9379 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9380 (also valid for MIPS R4000MC processors):
9381
9382 "52. R4000SC: This bug does not apply for the R4000PC.
9383
9384 There are two flavors of this bug:
9385
9386 1) If the instruction just after divide takes an RF exception
9387 (tlb-refill, tlb-invalid) and gets an instruction cache
9388 miss (both primary and secondary) and the line which is
9389 currently in secondary cache at this index had the first
9390 data word, where the bits 5..2 are set, then R4000 would
9391 get a wrong result for the div.
9392
9393 ##1
9394 nop
9395 div r8, r9
9396 ------------------- # end-of page. -tlb-refill
9397 nop
9398 ##2
9399 nop
9400 div r8, r9
9401 ------------------- # end-of page. -tlb-invalid
9402 nop
9403
9404 2) If the divide is in the taken branch delay slot, where the
9405 target takes RF exception and gets an I-cache miss for the
9406 exception vector or where I-cache miss occurs for the
9407 target address, under the above mentioned scenarios, the
9408 div would get wrong results.
9409
9410 ##1
9411 j r2 # to next page mapped or unmapped
9412 div r8,r9 # this bug would be there as long
9413 # as there is an ICache miss and
9414 nop # the "data pattern" is present
9415
9416 ##2
9417 beq r0, r0, NextPage # to Next page
9418 div r8,r9
9419 nop
9420
9421 This bug is present for div, divu, ddiv, and ddivu
9422 instructions.
9423
9424 Workaround: For item 1), OS could make sure that the next page
9425 after the divide instruction is also mapped. For item 2), the
9426 compiler could make sure that the divide instruction is not in
9427 the branch delay slot."
9428
9429 These processors have PRId values of 0x00004220 and 0x00004300 for
9430 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9431
9432 const char *
9433 mips_output_division (const char *division, rtx *operands)
9434 {
9435 const char *s;
9436
9437 s = division;
9438 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9439 {
9440 output_asm_insn (s, operands);
9441 s = "nop";
9442 }
9443 if (TARGET_CHECK_ZERO_DIV)
9444 {
9445 if (TARGET_MIPS16)
9446 {
9447 output_asm_insn (s, operands);
9448 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9449 }
9450 else if (GENERATE_DIVIDE_TRAPS)
9451 {
9452 output_asm_insn (s, operands);
9453 s = "teq\t%2,%.,7";
9454 }
9455 else
9456 {
9457 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9458 output_asm_insn (s, operands);
9459 s = "break\t7%)\n1:";
9460 }
9461 }
9462 return s;
9463 }
9464 \f
9465 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9466 with a final "000" replaced by "k". Ignore case.
9467
9468 Note: this function is shared between GCC and GAS. */
9469
9470 static bool
9471 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9472 {
9473 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9474 given++, canonical++;
9475
9476 return ((*given == 0 && *canonical == 0)
9477 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9478 }
9479
9480
9481 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9482 CPU name. We've traditionally allowed a lot of variation here.
9483
9484 Note: this function is shared between GCC and GAS. */
9485
9486 static bool
9487 mips_matching_cpu_name_p (const char *canonical, const char *given)
9488 {
9489 /* First see if the name matches exactly, or with a final "000"
9490 turned into "k". */
9491 if (mips_strict_matching_cpu_name_p (canonical, given))
9492 return true;
9493
9494 /* If not, try comparing based on numerical designation alone.
9495 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9496 if (TOLOWER (*given) == 'r')
9497 given++;
9498 if (!ISDIGIT (*given))
9499 return false;
9500
9501 /* Skip over some well-known prefixes in the canonical name,
9502 hoping to find a number there too. */
9503 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9504 canonical += 2;
9505 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9506 canonical += 2;
9507 else if (TOLOWER (canonical[0]) == 'r')
9508 canonical += 1;
9509
9510 return mips_strict_matching_cpu_name_p (canonical, given);
9511 }
9512
9513
9514 /* Return the mips_cpu_info entry for the processor or ISA given
9515 by CPU_STRING. Return null if the string isn't recognized.
9516
9517 A similar function exists in GAS. */
9518
9519 static const struct mips_cpu_info *
9520 mips_parse_cpu (const char *cpu_string)
9521 {
9522 const struct mips_cpu_info *p;
9523 const char *s;
9524
9525 /* In the past, we allowed upper-case CPU names, but it doesn't
9526 work well with the multilib machinery. */
9527 for (s = cpu_string; *s != 0; s++)
9528 if (ISUPPER (*s))
9529 {
9530 warning (0, "the cpu name must be lower case");
9531 break;
9532 }
9533
9534 /* 'from-abi' selects the most compatible architecture for the given
9535 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9536 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9537 version. Look first at the -mgp options, if given, otherwise base
9538 the choice on MASK_64BIT in TARGET_DEFAULT. */
9539 if (strcasecmp (cpu_string, "from-abi") == 0)
9540 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9541 : ABI_NEEDS_64BIT_REGS ? 3
9542 : (TARGET_64BIT ? 3 : 1));
9543
9544 /* 'default' has traditionally been a no-op. Probably not very useful. */
9545 if (strcasecmp (cpu_string, "default") == 0)
9546 return 0;
9547
9548 for (p = mips_cpu_info_table; p->name != 0; p++)
9549 if (mips_matching_cpu_name_p (p->name, cpu_string))
9550 return p;
9551
9552 return 0;
9553 }
9554
9555
9556 /* Return the processor associated with the given ISA level, or null
9557 if the ISA isn't valid. */
9558
9559 static const struct mips_cpu_info *
9560 mips_cpu_info_from_isa (int isa)
9561 {
9562 const struct mips_cpu_info *p;
9563
9564 for (p = mips_cpu_info_table; p->name != 0; p++)
9565 if (p->isa == isa)
9566 return p;
9567
9568 return 0;
9569 }
9570 \f
9571 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9572 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9573 they only hold condition code modes, and CCmode is always considered to
9574 be 4 bytes wide. All other registers are word sized. */
9575
9576 unsigned int
9577 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9578 {
9579 if (ST_REG_P (regno))
9580 return ((GET_MODE_SIZE (mode) + 3) / 4);
9581 else if (! FP_REG_P (regno))
9582 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9583 else
9584 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9585 }
9586
9587 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9588 all BLKmode objects are returned in memory. Under the new (N32 and
9589 64-bit MIPS ABIs) small structures are returned in a register.
9590 Objects with varying size must still be returned in memory, of
9591 course. */
9592
9593 static bool
9594 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9595 {
9596 if (TARGET_OLDABI)
9597 return (TYPE_MODE (type) == BLKmode);
9598 else
9599 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9600 || (int_size_in_bytes (type) == -1));
9601 }
9602
9603 static bool
9604 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9605 {
9606 return !TARGET_OLDABI;
9607 }
9608 \f
9609 /* Return true if INSN is a multiply-add or multiply-subtract
9610 instruction and PREV assigns to the accumulator operand. */
9611
9612 bool
9613 mips_linked_madd_p (rtx prev, rtx insn)
9614 {
9615 rtx x;
9616
9617 x = single_set (insn);
9618 if (x == 0)
9619 return false;
9620
9621 x = SET_SRC (x);
9622
9623 if (GET_CODE (x) == PLUS
9624 && GET_CODE (XEXP (x, 0)) == MULT
9625 && reg_set_p (XEXP (x, 1), prev))
9626 return true;
9627
9628 if (GET_CODE (x) == MINUS
9629 && GET_CODE (XEXP (x, 1)) == MULT
9630 && reg_set_p (XEXP (x, 0), prev))
9631 return true;
9632
9633 return false;
9634 }
9635 \f
9636 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9637 that may clobber hi or lo. */
9638
9639 static rtx mips_macc_chains_last_hilo;
9640
9641 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9642 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9643
9644 static void
9645 mips_macc_chains_record (rtx insn)
9646 {
9647 if (get_attr_may_clobber_hilo (insn))
9648 mips_macc_chains_last_hilo = insn;
9649 }
9650
9651 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9652 has NREADY elements, looking for a multiply-add or multiply-subtract
9653 instruction that is cumulative with mips_macc_chains_last_hilo.
9654 If there is one, promote it ahead of anything else that might
9655 clobber hi or lo. */
9656
9657 static void
9658 mips_macc_chains_reorder (rtx *ready, int nready)
9659 {
9660 int i, j;
9661
9662 if (mips_macc_chains_last_hilo != 0)
9663 for (i = nready - 1; i >= 0; i--)
9664 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9665 {
9666 for (j = nready - 1; j > i; j--)
9667 if (recog_memoized (ready[j]) >= 0
9668 && get_attr_may_clobber_hilo (ready[j]))
9669 {
9670 mips_promote_ready (ready, i, j);
9671 break;
9672 }
9673 break;
9674 }
9675 }
9676 \f
9677 /* The last instruction to be scheduled. */
9678
9679 static rtx vr4130_last_insn;
9680
9681 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9682 points to an rtx that is initially an instruction. Nullify the rtx
9683 if the instruction uses the value of register X. */
9684
9685 static void
9686 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9687 {
9688 rtx *insn_ptr = data;
9689 if (REG_P (x)
9690 && *insn_ptr != 0
9691 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9692 *insn_ptr = 0;
9693 }
9694
9695 /* Return true if there is true register dependence between vr4130_last_insn
9696 and INSN. */
9697
9698 static bool
9699 vr4130_true_reg_dependence_p (rtx insn)
9700 {
9701 note_stores (PATTERN (vr4130_last_insn),
9702 vr4130_true_reg_dependence_p_1, &insn);
9703 return insn == 0;
9704 }
9705
9706 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9707 the ready queue and that INSN2 is the instruction after it, return
9708 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9709 in which INSN1 and INSN2 can probably issue in parallel, but for
9710 which (INSN2, INSN1) should be less sensitive to instruction
9711 alignment than (INSN1, INSN2). See 4130.md for more details. */
9712
9713 static bool
9714 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9715 {
9716 rtx dep;
9717
9718 /* Check for the following case:
9719
9720 1) there is some other instruction X with an anti dependence on INSN1;
9721 2) X has a higher priority than INSN2; and
9722 3) X is an arithmetic instruction (and thus has no unit restrictions).
9723
9724 If INSN1 is the last instruction blocking X, it would better to
9725 choose (INSN1, X) over (INSN2, INSN1). */
9726 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9727 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9728 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9729 && recog_memoized (XEXP (dep, 0)) >= 0
9730 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9731 return false;
9732
9733 if (vr4130_last_insn != 0
9734 && recog_memoized (insn1) >= 0
9735 && recog_memoized (insn2) >= 0)
9736 {
9737 /* See whether INSN1 and INSN2 use different execution units,
9738 or if they are both ALU-type instructions. If so, they can
9739 probably execute in parallel. */
9740 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9741 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9742 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9743 {
9744 /* If only one of the instructions has a dependence on
9745 vr4130_last_insn, prefer to schedule the other one first. */
9746 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9747 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9748 if (dep1 != dep2)
9749 return dep1;
9750
9751 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9752 is not an ALU-type instruction and if INSN1 uses the same
9753 execution unit. (Note that if this condition holds, we already
9754 know that INSN2 uses a different execution unit.) */
9755 if (class1 != VR4130_CLASS_ALU
9756 && recog_memoized (vr4130_last_insn) >= 0
9757 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9758 return true;
9759 }
9760 }
9761 return false;
9762 }
9763
9764 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9765 queue with at least two instructions. Swap the first two if
9766 vr4130_swap_insns_p says that it could be worthwhile. */
9767
9768 static void
9769 vr4130_reorder (rtx *ready, int nready)
9770 {
9771 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9772 mips_promote_ready (ready, nready - 2, nready - 1);
9773 }
9774 \f
9775 /* Remove the instruction at index LOWER from ready queue READY and
9776 reinsert it in front of the instruction at index HIGHER. LOWER must
9777 be <= HIGHER. */
9778
9779 static void
9780 mips_promote_ready (rtx *ready, int lower, int higher)
9781 {
9782 rtx new_head;
9783 int i;
9784
9785 new_head = ready[lower];
9786 for (i = lower; i < higher; i++)
9787 ready[i] = ready[i + 1];
9788 ready[i] = new_head;
9789 }
9790
9791 /* Implement TARGET_SCHED_REORDER. */
9792
9793 static int
9794 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9795 rtx *ready, int *nreadyp, int cycle)
9796 {
9797 if (!reload_completed && TUNE_MACC_CHAINS)
9798 {
9799 if (cycle == 0)
9800 mips_macc_chains_last_hilo = 0;
9801 if (*nreadyp > 0)
9802 mips_macc_chains_reorder (ready, *nreadyp);
9803 }
9804 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9805 {
9806 if (cycle == 0)
9807 vr4130_last_insn = 0;
9808 if (*nreadyp > 1)
9809 vr4130_reorder (ready, *nreadyp);
9810 }
9811 return mips_issue_rate ();
9812 }
9813
9814 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9815
9816 static int
9817 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9818 rtx insn, int more)
9819 {
9820 switch (GET_CODE (PATTERN (insn)))
9821 {
9822 case USE:
9823 case CLOBBER:
9824 /* Don't count USEs and CLOBBERs against the issue rate. */
9825 break;
9826
9827 default:
9828 more--;
9829 if (!reload_completed && TUNE_MACC_CHAINS)
9830 mips_macc_chains_record (insn);
9831 vr4130_last_insn = insn;
9832 break;
9833 }
9834 return more;
9835 }
9836 \f
9837 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9838 dependencies have no cost. */
9839
9840 static int
9841 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9842 rtx dep ATTRIBUTE_UNUSED, int cost)
9843 {
9844 if (REG_NOTE_KIND (link) != 0)
9845 return 0;
9846 return cost;
9847 }
9848
9849 /* Return the number of instructions that can be issued per cycle. */
9850
9851 static int
9852 mips_issue_rate (void)
9853 {
9854 switch (mips_tune)
9855 {
9856 case PROCESSOR_R4130:
9857 case PROCESSOR_R5400:
9858 case PROCESSOR_R5500:
9859 case PROCESSOR_R7000:
9860 case PROCESSOR_R9000:
9861 return 2;
9862
9863 case PROCESSOR_SB1:
9864 /* This is actually 4, but we get better performance if we claim 3.
9865 This is partly because of unwanted speculative code motion with the
9866 larger number, and partly because in most common cases we can't
9867 reach the theoretical max of 4. */
9868 return 3;
9869
9870 default:
9871 return 1;
9872 }
9873 }
9874
9875 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9876 be as wide as the scheduling freedom in the DFA. */
9877
9878 static int
9879 mips_multipass_dfa_lookahead (void)
9880 {
9881 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9882 if (mips_tune == PROCESSOR_SB1)
9883 return 4;
9884
9885 return 0;
9886 }
9887 \f
9888 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9889 return the first operand of the associated "pref" or "prefx" insn. */
9890
9891 rtx
9892 mips_prefetch_cookie (rtx write, rtx locality)
9893 {
9894 /* store_streamed / load_streamed. */
9895 if (INTVAL (locality) <= 0)
9896 return GEN_INT (INTVAL (write) + 4);
9897
9898 /* store / load. */
9899 if (INTVAL (locality) <= 2)
9900 return write;
9901
9902 /* store_retained / load_retained. */
9903 return GEN_INT (INTVAL (write) + 6);
9904 }
9905 \f
9906 /* MIPS builtin function support. */
9907
9908 struct builtin_description
9909 {
9910 /* The code of the main .md file instruction. See mips_builtin_type
9911 for more information. */
9912 enum insn_code icode;
9913
9914 /* The floating-point comparison code to use with ICODE, if any. */
9915 enum mips_fp_condition cond;
9916
9917 /* The name of the builtin function. */
9918 const char *name;
9919
9920 /* Specifies how the function should be expanded. */
9921 enum mips_builtin_type builtin_type;
9922
9923 /* The function's prototype. */
9924 enum mips_function_type function_type;
9925
9926 /* The target flags required for this function. */
9927 int target_flags;
9928 };
9929
9930 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9931 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9932 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9933 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9934 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9935
9936 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9937 TARGET_FLAGS. */
9938 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9939 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9940 "__builtin_mips_" #INSN "_" #COND "_s", \
9941 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9942 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9943 "__builtin_mips_" #INSN "_" #COND "_d", \
9944 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9945
9946 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9947 The lower and upper forms require TARGET_FLAGS while the any and all
9948 forms require MASK_MIPS3D. */
9949 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9950 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9951 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9952 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9953 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9954 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9955 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9956 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9957 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9958 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9959 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9960 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9961 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9962
9963 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9964 require MASK_MIPS3D. */
9965 #define CMP_4S_BUILTINS(INSN, COND) \
9966 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9967 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9968 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9969 MASK_MIPS3D }, \
9970 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9971 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9972 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9973 MASK_MIPS3D }
9974
9975 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9976 instruction requires TARGET_FLAGS. */
9977 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9978 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9979 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9980 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9981 TARGET_FLAGS }, \
9982 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9983 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9984 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9985 TARGET_FLAGS }
9986
9987 /* Define all the builtins related to c.cond.fmt condition COND. */
9988 #define CMP_BUILTINS(COND) \
9989 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9990 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9991 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9992 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9993 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9994 CMP_4S_BUILTINS (c, COND), \
9995 CMP_4S_BUILTINS (cabs, COND)
9996
9997 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9998 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9999
10000 static const struct builtin_description mips_bdesc[] =
10001 {
10002 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10003 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10004 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10005 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10006 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10007 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10008 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10009 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10010
10011 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10012 MASK_PAIRED_SINGLE_FLOAT),
10013 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10014 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10015 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10016 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10017
10018 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10019 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10020 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10021 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10022 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10023 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10024
10025 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10026 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10027 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10028 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10029 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10030 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10031
10032 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10033 };
10034
10035 /* Builtin functions for the SB-1 processor. */
10036
10037 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10038
10039 static const struct builtin_description sb1_bdesc[] =
10040 {
10041 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10042 };
10043
10044 /* Builtin functions for DSP ASE. */
10045
10046 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10047 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10048 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10049 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10050
10051 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10052 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10053 builtin_description fields. */
10054 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10055 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10056 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10057
10058 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10059 branch instruction. TARGET_FLAGS is a builtin_description field. */
10060 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10061 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10062 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10063
10064 static const struct builtin_description dsp_bdesc[] =
10065 {
10066 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10067 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10068 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10069 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10070 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10071 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10072 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10073 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10074 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10075 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10076 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10077 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10078 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10079 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10080 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10081 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10082 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10083 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10084 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10085 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10086 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10087 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10088 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10089 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10090 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10091 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10092 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10093 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10094 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10095 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10096 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10097 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10098 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10099 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10100 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10101 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10102 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10103 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10104 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10105 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10106 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10107 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10108 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10109 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10110 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10111 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10112 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10113 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10114 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10115 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10116 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10117 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10118 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10119 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10120 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10121 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10122 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10123 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10124 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10125 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10126 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10127 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10128 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10129 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10130 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10131 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10132 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10133 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10134 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10135 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10136 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10137 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10138 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10139 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10140 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10141 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10142 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10143 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10144 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10145 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10146 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10147 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10148 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10149 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10150 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10151 BPOSGE_BUILTIN (32, MASK_DSP)
10152 };
10153
10154 /* This helps provide a mapping from builtin function codes to bdesc
10155 arrays. */
10156
10157 struct bdesc_map
10158 {
10159 /* The builtin function table that this entry describes. */
10160 const struct builtin_description *bdesc;
10161
10162 /* The number of entries in the builtin function table. */
10163 unsigned int size;
10164
10165 /* The target processor that supports these builtin functions.
10166 PROCESSOR_MAX means we enable them for all processors. */
10167 enum processor_type proc;
10168 };
10169
10170 static const struct bdesc_map bdesc_arrays[] =
10171 {
10172 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
10173 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
10174 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
10175 };
10176
10177 /* Take the head of argument list *ARGLIST and convert it into a form
10178 suitable for input operand OP of instruction ICODE. Return the value
10179 and point *ARGLIST at the next element of the list. */
10180
10181 static rtx
10182 mips_prepare_builtin_arg (enum insn_code icode,
10183 unsigned int op, tree *arglist)
10184 {
10185 rtx value;
10186 enum machine_mode mode;
10187
10188 value = expand_normal (TREE_VALUE (*arglist));
10189 mode = insn_data[icode].operand[op].mode;
10190 if (!insn_data[icode].operand[op].predicate (value, mode))
10191 {
10192 value = copy_to_mode_reg (mode, value);
10193 /* Check the predicate again. */
10194 if (!insn_data[icode].operand[op].predicate (value, mode))
10195 {
10196 error ("invalid argument to builtin function");
10197 return const0_rtx;
10198 }
10199 }
10200
10201 *arglist = TREE_CHAIN (*arglist);
10202 return value;
10203 }
10204
10205 /* Return an rtx suitable for output operand OP of instruction ICODE.
10206 If TARGET is non-null, try to use it where possible. */
10207
10208 static rtx
10209 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10210 {
10211 enum machine_mode mode;
10212
10213 mode = insn_data[icode].operand[op].mode;
10214 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10215 target = gen_reg_rtx (mode);
10216
10217 return target;
10218 }
10219
10220 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10221
10222 rtx
10223 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10224 enum machine_mode mode ATTRIBUTE_UNUSED,
10225 int ignore ATTRIBUTE_UNUSED)
10226 {
10227 enum insn_code icode;
10228 enum mips_builtin_type type;
10229 tree fndecl, arglist;
10230 unsigned int fcode;
10231 const struct builtin_description *bdesc;
10232 const struct bdesc_map *m;
10233
10234 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
10235 arglist = TREE_OPERAND (exp, 1);
10236 fcode = DECL_FUNCTION_CODE (fndecl);
10237
10238 bdesc = NULL;
10239 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10240 {
10241 if (fcode < m->size)
10242 {
10243 bdesc = m->bdesc;
10244 icode = bdesc[fcode].icode;
10245 type = bdesc[fcode].builtin_type;
10246 break;
10247 }
10248 fcode -= m->size;
10249 }
10250 if (bdesc == NULL)
10251 return 0;
10252
10253 switch (type)
10254 {
10255 case MIPS_BUILTIN_DIRECT:
10256 return mips_expand_builtin_direct (icode, target, arglist, true);
10257
10258 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10259 return mips_expand_builtin_direct (icode, target, arglist, false);
10260
10261 case MIPS_BUILTIN_MOVT:
10262 case MIPS_BUILTIN_MOVF:
10263 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10264 target, arglist);
10265
10266 case MIPS_BUILTIN_CMP_ANY:
10267 case MIPS_BUILTIN_CMP_ALL:
10268 case MIPS_BUILTIN_CMP_UPPER:
10269 case MIPS_BUILTIN_CMP_LOWER:
10270 case MIPS_BUILTIN_CMP_SINGLE:
10271 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10272 target, arglist);
10273
10274 case MIPS_BUILTIN_BPOSGE32:
10275 return mips_expand_builtin_bposge (type, target);
10276
10277 default:
10278 return 0;
10279 }
10280 }
10281
10282 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10283
10284 void
10285 mips_init_builtins (void)
10286 {
10287 const struct builtin_description *d;
10288 const struct bdesc_map *m;
10289 tree types[(int) MIPS_MAX_FTYPE_MAX];
10290 tree V2SF_type_node;
10291 tree V2HI_type_node;
10292 tree V4QI_type_node;
10293 unsigned int offset;
10294
10295 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10296 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10297 return;
10298
10299 if (TARGET_PAIRED_SINGLE_FLOAT)
10300 {
10301 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10302
10303 types[MIPS_V2SF_FTYPE_V2SF]
10304 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10305
10306 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10307 = build_function_type_list (V2SF_type_node,
10308 V2SF_type_node, V2SF_type_node, NULL_TREE);
10309
10310 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10311 = build_function_type_list (V2SF_type_node,
10312 V2SF_type_node, V2SF_type_node,
10313 integer_type_node, NULL_TREE);
10314
10315 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10316 = build_function_type_list (V2SF_type_node,
10317 V2SF_type_node, V2SF_type_node,
10318 V2SF_type_node, V2SF_type_node, NULL_TREE);
10319
10320 types[MIPS_V2SF_FTYPE_SF_SF]
10321 = build_function_type_list (V2SF_type_node,
10322 float_type_node, float_type_node, NULL_TREE);
10323
10324 types[MIPS_INT_FTYPE_V2SF_V2SF]
10325 = build_function_type_list (integer_type_node,
10326 V2SF_type_node, V2SF_type_node, NULL_TREE);
10327
10328 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10329 = build_function_type_list (integer_type_node,
10330 V2SF_type_node, V2SF_type_node,
10331 V2SF_type_node, V2SF_type_node, NULL_TREE);
10332
10333 types[MIPS_INT_FTYPE_SF_SF]
10334 = build_function_type_list (integer_type_node,
10335 float_type_node, float_type_node, NULL_TREE);
10336
10337 types[MIPS_INT_FTYPE_DF_DF]
10338 = build_function_type_list (integer_type_node,
10339 double_type_node, double_type_node, NULL_TREE);
10340
10341 types[MIPS_SF_FTYPE_V2SF]
10342 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10343
10344 types[MIPS_SF_FTYPE_SF]
10345 = build_function_type_list (float_type_node,
10346 float_type_node, NULL_TREE);
10347
10348 types[MIPS_SF_FTYPE_SF_SF]
10349 = build_function_type_list (float_type_node,
10350 float_type_node, float_type_node, NULL_TREE);
10351
10352 types[MIPS_DF_FTYPE_DF]
10353 = build_function_type_list (double_type_node,
10354 double_type_node, NULL_TREE);
10355
10356 types[MIPS_DF_FTYPE_DF_DF]
10357 = build_function_type_list (double_type_node,
10358 double_type_node, double_type_node, NULL_TREE);
10359 }
10360
10361 if (TARGET_DSP)
10362 {
10363 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10364 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10365
10366 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10367 = build_function_type_list (V2HI_type_node,
10368 V2HI_type_node, V2HI_type_node,
10369 NULL_TREE);
10370
10371 types[MIPS_SI_FTYPE_SI_SI]
10372 = build_function_type_list (intSI_type_node,
10373 intSI_type_node, intSI_type_node,
10374 NULL_TREE);
10375
10376 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10377 = build_function_type_list (V4QI_type_node,
10378 V4QI_type_node, V4QI_type_node,
10379 NULL_TREE);
10380
10381 types[MIPS_SI_FTYPE_V4QI]
10382 = build_function_type_list (intSI_type_node,
10383 V4QI_type_node,
10384 NULL_TREE);
10385
10386 types[MIPS_V2HI_FTYPE_V2HI]
10387 = build_function_type_list (V2HI_type_node,
10388 V2HI_type_node,
10389 NULL_TREE);
10390
10391 types[MIPS_SI_FTYPE_SI]
10392 = build_function_type_list (intSI_type_node,
10393 intSI_type_node,
10394 NULL_TREE);
10395
10396 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10397 = build_function_type_list (V4QI_type_node,
10398 V2HI_type_node, V2HI_type_node,
10399 NULL_TREE);
10400
10401 types[MIPS_V2HI_FTYPE_SI_SI]
10402 = build_function_type_list (V2HI_type_node,
10403 intSI_type_node, intSI_type_node,
10404 NULL_TREE);
10405
10406 types[MIPS_SI_FTYPE_V2HI]
10407 = build_function_type_list (intSI_type_node,
10408 V2HI_type_node,
10409 NULL_TREE);
10410
10411 types[MIPS_V2HI_FTYPE_V4QI]
10412 = build_function_type_list (V2HI_type_node,
10413 V4QI_type_node,
10414 NULL_TREE);
10415
10416 types[MIPS_V4QI_FTYPE_V4QI_SI]
10417 = build_function_type_list (V4QI_type_node,
10418 V4QI_type_node, intSI_type_node,
10419 NULL_TREE);
10420
10421 types[MIPS_V2HI_FTYPE_V2HI_SI]
10422 = build_function_type_list (V2HI_type_node,
10423 V2HI_type_node, intSI_type_node,
10424 NULL_TREE);
10425
10426 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10427 = build_function_type_list (V2HI_type_node,
10428 V4QI_type_node, V2HI_type_node,
10429 NULL_TREE);
10430
10431 types[MIPS_SI_FTYPE_V2HI_V2HI]
10432 = build_function_type_list (intSI_type_node,
10433 V2HI_type_node, V2HI_type_node,
10434 NULL_TREE);
10435
10436 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10437 = build_function_type_list (intDI_type_node,
10438 intDI_type_node, V4QI_type_node, V4QI_type_node,
10439 NULL_TREE);
10440
10441 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10442 = build_function_type_list (intDI_type_node,
10443 intDI_type_node, V2HI_type_node, V2HI_type_node,
10444 NULL_TREE);
10445
10446 types[MIPS_DI_FTYPE_DI_SI_SI]
10447 = build_function_type_list (intDI_type_node,
10448 intDI_type_node, intSI_type_node, intSI_type_node,
10449 NULL_TREE);
10450
10451 types[MIPS_V4QI_FTYPE_SI]
10452 = build_function_type_list (V4QI_type_node,
10453 intSI_type_node,
10454 NULL_TREE);
10455
10456 types[MIPS_V2HI_FTYPE_SI]
10457 = build_function_type_list (V2HI_type_node,
10458 intSI_type_node,
10459 NULL_TREE);
10460
10461 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10462 = build_function_type_list (void_type_node,
10463 V4QI_type_node, V4QI_type_node,
10464 NULL_TREE);
10465
10466 types[MIPS_SI_FTYPE_V4QI_V4QI]
10467 = build_function_type_list (intSI_type_node,
10468 V4QI_type_node, V4QI_type_node,
10469 NULL_TREE);
10470
10471 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10472 = build_function_type_list (void_type_node,
10473 V2HI_type_node, V2HI_type_node,
10474 NULL_TREE);
10475
10476 types[MIPS_SI_FTYPE_DI_SI]
10477 = build_function_type_list (intSI_type_node,
10478 intDI_type_node, intSI_type_node,
10479 NULL_TREE);
10480
10481 types[MIPS_DI_FTYPE_DI_SI]
10482 = build_function_type_list (intDI_type_node,
10483 intDI_type_node, intSI_type_node,
10484 NULL_TREE);
10485
10486 types[MIPS_VOID_FTYPE_SI_SI]
10487 = build_function_type_list (void_type_node,
10488 intSI_type_node, intSI_type_node,
10489 NULL_TREE);
10490
10491 types[MIPS_SI_FTYPE_PTR_SI]
10492 = build_function_type_list (intSI_type_node,
10493 ptr_type_node, intSI_type_node,
10494 NULL_TREE);
10495
10496 types[MIPS_SI_FTYPE_VOID]
10497 = build_function_type (intSI_type_node, void_list_node);
10498 }
10499
10500 /* Iterate through all of the bdesc arrays, initializing all of the
10501 builtin functions. */
10502
10503 offset = 0;
10504 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10505 {
10506 if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10507 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10508 if ((d->target_flags & target_flags) == d->target_flags)
10509 lang_hooks.builtin_function (d->name, types[d->function_type],
10510 d - m->bdesc + offset,
10511 BUILT_IN_MD, NULL, NULL);
10512 offset += m->size;
10513 }
10514 }
10515
10516 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10517 .md pattern and ARGLIST is the list of function arguments. TARGET,
10518 if nonnull, suggests a good place to put the result.
10519 HAS_TARGET indicates the function must return something. */
10520
10521 static rtx
10522 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
10523 bool has_target)
10524 {
10525 rtx ops[MAX_RECOG_OPERANDS];
10526 int i = 0;
10527
10528 if (has_target)
10529 {
10530 /* We save target to ops[0]. */
10531 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10532 i = 1;
10533 }
10534
10535 /* We need to test if arglist is not zero. Some instructions have extra
10536 clobber registers. */
10537 for (; i < insn_data[icode].n_operands && arglist != 0; i++)
10538 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10539
10540 switch (i)
10541 {
10542 case 2:
10543 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10544 break;
10545
10546 case 3:
10547 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10548 break;
10549
10550 case 4:
10551 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10552 break;
10553
10554 default:
10555 gcc_unreachable ();
10556 }
10557 return target;
10558 }
10559
10560 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10561 function (TYPE says which). ARGLIST is the list of arguments to the
10562 function, ICODE is the instruction that should be used to compare
10563 the first two arguments, and COND is the condition it should test.
10564 TARGET, if nonnull, suggests a good place to put the result. */
10565
10566 static rtx
10567 mips_expand_builtin_movtf (enum mips_builtin_type type,
10568 enum insn_code icode, enum mips_fp_condition cond,
10569 rtx target, tree arglist)
10570 {
10571 rtx cmp_result, op0, op1;
10572
10573 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10574 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10575 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10576 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10577
10578 icode = CODE_FOR_mips_cond_move_tf_ps;
10579 target = mips_prepare_builtin_target (icode, 0, target);
10580 if (type == MIPS_BUILTIN_MOVT)
10581 {
10582 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10583 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10584 }
10585 else
10586 {
10587 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10588 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10589 }
10590 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10591 return target;
10592 }
10593
10594 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10595 into TARGET otherwise. Return TARGET. */
10596
10597 static rtx
10598 mips_builtin_branch_and_move (rtx condition, rtx target,
10599 rtx value_if_true, rtx value_if_false)
10600 {
10601 rtx true_label, done_label;
10602
10603 true_label = gen_label_rtx ();
10604 done_label = gen_label_rtx ();
10605
10606 /* First assume that CONDITION is false. */
10607 emit_move_insn (target, value_if_false);
10608
10609 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10610 emit_jump_insn (gen_condjump (condition, true_label));
10611 emit_jump_insn (gen_jump (done_label));
10612 emit_barrier ();
10613
10614 /* Fix TARGET if CONDITION is true. */
10615 emit_label (true_label);
10616 emit_move_insn (target, value_if_true);
10617
10618 emit_label (done_label);
10619 return target;
10620 }
10621
10622 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10623 of the comparison instruction and COND is the condition it should test.
10624 ARGLIST is the list of function arguments and TARGET, if nonnull,
10625 suggests a good place to put the boolean result. */
10626
10627 static rtx
10628 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10629 enum insn_code icode, enum mips_fp_condition cond,
10630 rtx target, tree arglist)
10631 {
10632 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10633 int i;
10634
10635 if (target == 0 || GET_MODE (target) != SImode)
10636 target = gen_reg_rtx (SImode);
10637
10638 /* Prepare the operands to the comparison. */
10639 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10640 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10641 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10642
10643 switch (insn_data[icode].n_operands)
10644 {
10645 case 4:
10646 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10647 break;
10648
10649 case 6:
10650 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10651 ops[3], ops[4], GEN_INT (cond)));
10652 break;
10653
10654 default:
10655 gcc_unreachable ();
10656 }
10657
10658 /* If the comparison sets more than one register, we define the result
10659 to be 0 if all registers are false and -1 if all registers are true.
10660 The value of the complete result is indeterminate otherwise. */
10661 switch (builtin_type)
10662 {
10663 case MIPS_BUILTIN_CMP_ALL:
10664 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10665 return mips_builtin_branch_and_move (condition, target,
10666 const0_rtx, const1_rtx);
10667
10668 case MIPS_BUILTIN_CMP_UPPER:
10669 case MIPS_BUILTIN_CMP_LOWER:
10670 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10671 condition = gen_single_cc (cmp_result, offset);
10672 return mips_builtin_branch_and_move (condition, target,
10673 const1_rtx, const0_rtx);
10674
10675 default:
10676 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10677 return mips_builtin_branch_and_move (condition, target,
10678 const1_rtx, const0_rtx);
10679 }
10680 }
10681
10682 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10683 suggests a good place to put the boolean result. */
10684
10685 static rtx
10686 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10687 {
10688 rtx condition, cmp_result;
10689 int cmp_value;
10690
10691 if (target == 0 || GET_MODE (target) != SImode)
10692 target = gen_reg_rtx (SImode);
10693
10694 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10695
10696 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10697 cmp_value = 32;
10698 else
10699 gcc_assert (0);
10700
10701 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10702 return mips_builtin_branch_and_move (condition, target,
10703 const1_rtx, const0_rtx);
10704 }
10705 \f
10706 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10707 FIRST is true if this is the first time handling this decl. */
10708
10709 static void
10710 mips_encode_section_info (tree decl, rtx rtl, int first)
10711 {
10712 default_encode_section_info (decl, rtl, first);
10713
10714 if (TREE_CODE (decl) == FUNCTION_DECL
10715 && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10716 {
10717 rtx symbol = XEXP (rtl, 0);
10718 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10719 }
10720 }
10721
10722 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. PIC_FUNCTION_ADDR_REGNUM is live
10723 on entry to a function when generating -mshared abicalls code. */
10724
10725 static void
10726 mips_extra_live_on_entry (bitmap regs)
10727 {
10728 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
10729 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10730 }
10731
10732 /* SImode values are represented as sign-extended to DImode. */
10733
10734 int
10735 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10736 {
10737 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10738 return SIGN_EXTEND;
10739
10740 return UNKNOWN;
10741 }
10742 \f
10743 #include "gt-mips.h"